diff --git a/go.mod b/go.mod index b8966751355dc..28ccc692edd26 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/cespare/xxhash/v2 v2.1.1 github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 // indirect github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf - github.com/cortexproject/cortex v1.2.1-0.20200727121049-4cfa4a2978c2 + github.com/cortexproject/cortex v1.2.1-0.20200803161316-7014ff11ed70 github.com/davecgh/go-spew v1.1.1 github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82 // indirect @@ -57,7 +57,7 @@ require ( go.etcd.io/bbolt v1.3.5-0.20200615073812-232d8fc87f50 go.uber.org/atomic v1.6.0 golang.org/x/net v0.0.0-20200707034311-ab3426394381 - google.golang.org/grpc v1.29.1 + google.golang.org/grpc v1.30.0 gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/fsnotify.v1 v1.4.7 gopkg.in/yaml.v2 v2.3.0 @@ -75,3 +75,6 @@ replace github.com/satori/go.uuid => github.com/satori/go.uuid v1.2.0 // Use fork of gocql that has gokit logs and Prometheus metrics. replace github.com/gocql/gocql => github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85 + +// Same as Cortex, we can't upgrade to grpc 1.30.0 until go.etcd.io/etcd will support it. +replace google.golang.org/grpc => google.golang.org/grpc v1.29.1 diff --git a/go.sum b/go.sum index 191a1a866d428..b445f04724a16 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,3 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -229,7 +228,6 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= @@ -254,8 +252,8 @@ github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfc github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cortexproject/cortex v0.6.1-0.20200228110116-92ab6cbe0995/go.mod h1:3Xa3DjJxtpXqxcMGdk850lcIRb81M0fyY1MQ6udY134= -github.com/cortexproject/cortex v1.2.1-0.20200727121049-4cfa4a2978c2 h1:KseIJ2j4OJ8Vt9B2dpUyAgqgoeoRtFxLydxabmTToDg= -github.com/cortexproject/cortex v1.2.1-0.20200727121049-4cfa4a2978c2/go.mod h1:zBfkUqePbDsIbPaClWi31N3wC93h76vu0ONPNYQitCs= +github.com/cortexproject/cortex v1.2.1-0.20200803161316-7014ff11ed70 h1:bb36PT92p0jXS/8a0ftfudnD9qle3hnSInV2Z9E9Wx8= +github.com/cortexproject/cortex v1.2.1-0.20200803161316-7014ff11ed70/go.mod h1:PVPxNLrxKH+yc8asaJOxuz7TiRmMizFfnSMOnRzM6oM= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= @@ -323,9 +321,6 @@ github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkg github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/ema/qdisc v0.0.0-20190904071900-b82c76788043/go.mod h1:ix4kG2zvdUd8kEKSW0ZTr1XLks0epFpI4j745DXxlNE= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -837,7 +832,6 @@ github.com/lightstep/lightstep-tracer-go v0.18.0/go.mod h1:jlF1pusYV4pidLvZ+XD0U github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lovoo/gcloud-opentracing v0.3.0/go.mod h1:ZFqk2y38kMDDikZPAK7ynTTGuyt17nSPdS3K5e+ZTBY= github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -1096,7 +1090,7 @@ github.com/prometheus/prometheus v0.0.0-20190818123050-43acd0e2e93f/go.mod h1:rM github.com/prometheus/prometheus v1.8.2-0.20200107122003-4708915ac6ef/go.mod h1:7U90zPoLkWjEIQcy/rweQla82OCTUzxVHE51G3OhJbI= github.com/prometheus/prometheus v1.8.2-0.20200213233353-b90be6f32a33 h1:HBYrMJj5iosUjUkAK9L5GO+5eEQXbcrzdjkqY9HV5W4= github.com/prometheus/prometheus v1.8.2-0.20200213233353-b90be6f32a33/go.mod h1:fkIPPkuZnkXyopYHmXPxf9rgiPkVgZCN8w9o8+UgBlY= -github.com/prometheus/prometheus v1.8.2-0.20200619100132-74207c04655e/go.mod h1:QV6T0PPQi5UFmqcLBJw3JiyIR8r1O7KEv9qlVw4VV40= +github.com/prometheus/prometheus v1.8.2-0.20200707115909-30505a202a4c/go.mod h1:/kMSPIRsxr/apyHxlzYMdFnaPXUXXqILU5uzIoNhOvc= github.com/prometheus/prometheus v1.8.2-0.20200722151933-4a8531a64b32 h1:GcJMaFu1uu6rSueToTRZuVS3AiORbFtLEDMUfp4GA9Q= github.com/prometheus/prometheus v1.8.2-0.20200722151933-4a8531a64b32/go.mod h1:+/y4DzJ62qmhy0o/H4PtXegRXw+80E8RVRHhLbv+bkM= github.com/prometheus/prometheus v1.8.2-0.20200727090838-6f296594a852 h1:aRBuOcI/bN5f/UqmIGn8CajY6W0mPBEajK8q+SFgNZY= @@ -1198,8 +1192,8 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/thanos-io/thanos v0.8.1-0.20200109203923-552ffa4c1a0d/go.mod h1:usT/TxtJQ7DzinTt+G9kinDQmRS5sxwu0unVKZ9vdcw= -github.com/thanos-io/thanos v0.13.1-0.20200625180332-f078faed1b96 h1:McsluZ8fXVwGbdXsZ20uZNGukmPycDU9m6df64S2bqQ= -github.com/thanos-io/thanos v0.13.1-0.20200625180332-f078faed1b96/go.mod h1:VuNcGvUE0u57S1XXqYhf0dQzUO3wUnw2B5IKsju+1z4= +github.com/thanos-io/thanos v0.13.1-0.20200731083140-69b87607decf h1:yq9nWz5Iv6ejE9d/fToxgcVDk8iuAcpvrWfsHsNySxU= +github.com/thanos-io/thanos v0.13.1-0.20200731083140-69b87607decf/go.mod h1:G8caR6G7pSDreRDvFm9wFuyjEBztmr8Ag3kBYpa/fEc= github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -1349,7 +1343,6 @@ golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMk golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -1373,7 +1366,6 @@ golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1442,7 +1434,6 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2By golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1505,7 +1496,6 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1 h1:ogLJMz+qpzav7lGMh10LMvAkM/fAoGlaiiHYiFYdm80= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo= @@ -1530,11 +1520,9 @@ golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjTo golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190118193359-16909d206f00/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1622,7 +1610,6 @@ google.golang.org/api v0.26.0 h1:VJZ8h6E8ip82FRpQl848c5vAadxlTXrUh8RzQzSRm08= google.golang.org/api v0.26.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0 h1:BaiDisFir8O4IJxvAabCGGkQ6yCJegNQqSVoYUNAnbk= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1666,25 +1653,6 @@ google.golang.org/genproto v0.0.0-20200710124503-20a17af7bd0e h1:k+p/u26/lVeNEpd google.golang.org/genproto v0.0.0-20200710124503-20a17af7bd0e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200724131911-43cab4749ae7 h1:AWgNCmk2V5HZp9AiCDRBExX/b9I0Ey9F8STHDZlhCC4= google.golang.org/genproto v0.0.0-20200724131911-43cab4749ae7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= -google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -1743,7 +1711,6 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclp gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go index bb546d1309b4e..b87eef9f519ce 100644 --- a/pkg/querier/queryrange/roundtrip.go +++ b/pkg/querier/queryrange/roundtrip.go @@ -55,7 +55,8 @@ func NewTripperware( shardingMetrics := logql.NewShardingMetrics(registerer) splitByMetrics := NewSplitByMetrics(registerer) - metricsTripperware, cache, err := NewMetricTripperware(cfg, log, limits, schema, minShardingLookback, lokiCodec, PrometheusExtractor{}, instrumentMetrics, retryMetrics, shardingMetrics, splitByMetrics) + metricsTripperware, cache, err := NewMetricTripperware(cfg, log, limits, schema, minShardingLookback, lokiCodec, + PrometheusExtractor{}, instrumentMetrics, retryMetrics, shardingMetrics, splitByMetrics, registerer) if err != nil { return nil, nil, err } @@ -304,6 +305,7 @@ func NewMetricTripperware( retryMiddlewareMetrics *queryrange.RetryMiddlewareMetrics, shardingMetrics *logql.ShardingMetrics, splitByMetrics *SplitByMetrics, + registerer prometheus.Registerer, ) (frontend.Tripperware, Stopper, error) { queryRangeMiddleware := []queryrange.Middleware{StatsCollectorMiddleware(), queryrange.LimitsMiddleware(limits)} if cfg.AlignQueriesWithStep { @@ -335,6 +337,7 @@ func NewMetricTripperware( codec, extractor, nil, + registerer, ) if err != nil { return nil, nil, err diff --git a/pkg/storage/store.go b/pkg/storage/store.go index 43b4f8bffb49a..7b5086ba3f1e0 100644 --- a/pkg/storage/store.go +++ b/pkg/storage/store.go @@ -11,6 +11,7 @@ import ( cortex_local "github.com/cortexproject/cortex/pkg/chunk/local" "github.com/cortexproject/cortex/pkg/chunk/storage" "github.com/cortexproject/cortex/pkg/querier/astmapper" + pkg_util "github.com/cortexproject/cortex/pkg/util" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" @@ -80,7 +81,7 @@ type store struct { // NewStore creates a new Loki Store using configuration supplied. func NewStore(cfg Config, storeCfg chunk.StoreConfig, schemaCfg SchemaConfig, limits storage.StoreLimits, registerer prometheus.Registerer) (Store, error) { - s, err := storage.NewStore(cfg.Config, storeCfg, schemaCfg.SchemaConfig, limits, registerer, nil) + s, err := storage.NewStore(cfg.Config, storeCfg, schemaCfg.SchemaConfig, limits, registerer, nil, pkg_util.Logger) if err != nil { return nil, err } diff --git a/pkg/storage/util_test.go b/pkg/storage/util_test.go index 15771cd6255ea..9e2b267502773 100644 --- a/pkg/storage/util_test.go +++ b/pkg/storage/util_test.go @@ -6,6 +6,8 @@ import ( "testing" "time" + pkg_util "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/chunk/cache" "github.com/cortexproject/cortex/pkg/ingester/client" @@ -199,7 +201,7 @@ func (m *mockChunkStore) GetChunkRefs(ctx context.Context, userID string, from, refs = append(refs, r) } - cache, err := cache.New(cache.Config{Prefix: "chunks"}) + cache, err := cache.New(cache.Config{Prefix: "chunks"}, nil, pkg_util.Logger) if err != nil { panic(err) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/api/api.go b/vendor/github.com/cortexproject/cortex/pkg/api/api.go index 4238dcad5d326..45db017daf4b9 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/api/api.go +++ b/vendor/github.com/cortexproject/cortex/pkg/api/api.go @@ -269,7 +269,7 @@ func (a *API) RegisterQuerier( ) http.Handler { api := v1.NewAPI( engine, - queryable, + errorTranslateQueryable{queryable}, // Translate errors to errors expected by API. func(context.Context) v1.TargetRetriever { return &querier.DummyTargetRetriever{} }, func(context.Context) v1.AlertmanagerRetriever { return &querier.DummyAlertmanagerRetriever{} }, func() config.Config { return config.Config{} }, diff --git a/vendor/github.com/cortexproject/cortex/pkg/api/queryable.go b/vendor/github.com/cortexproject/cortex/pkg/api/queryable.go new file mode 100644 index 0000000000000..8682872a47e10 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/api/queryable.go @@ -0,0 +1,158 @@ +package api + +import ( + "context" + + "github.com/gogo/status" + "github.com/pkg/errors" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/storage" + + "github.com/cortexproject/cortex/pkg/chunk" +) + +func translateError(err error) error { + if err == nil { + return err + } + + // vendor/github.com/prometheus/prometheus/web/api/v1/api.go, respondError function only accepts + // *apiError types. + // Translation of error to *apiError happens in vendor/github.com/prometheus/prometheus/web/api/v1/api.go, returnAPIError method. + // It only supports: + // promql.ErrQueryCanceled, mapped to 503 + // promql.ErrQueryTimeout, mapped to 503 + // promql.ErrStorage mapped to 500 + // anything else is mapped to 422 + + switch errors.Cause(err).(type) { + case promql.ErrStorage, promql.ErrTooManySamples, promql.ErrQueryCanceled, promql.ErrQueryTimeout: + // Don't translate those, just in case we use them internally. + return err + case chunk.QueryError: + // This will be returned with status code 422 by Prometheus API. + return err + default: + if errors.Is(err, context.Canceled) { + return err // 422 + } + + s, ok := status.FromError(err) + if ok { + code := s.Code() + + // Treat these as HTTP status codes, even though they are supposed to be grpc codes. + if code >= 400 && code < 500 { + // Return directly, will be mapped to 422 + return err + } else if code >= 500 && code < 599 { + // Wrap into ErrStorage for mapping to 500 + return promql.ErrStorage{Err: err} + } + } + + // All other errors will be returned as 500. + return promql.ErrStorage{Err: err} + } +} + +type errorTranslateQueryable struct { + q storage.SampleAndChunkQueryable +} + +func (e errorTranslateQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + q, err := e.q.Querier(ctx, mint, maxt) + return errorTranslateQuerier{q: q}, translateError(err) +} + +func (e errorTranslateQueryable) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { + q, err := e.q.ChunkQuerier(ctx, mint, maxt) + return errorTranslateChunkQuerier{q: q}, translateError(err) +} + +type errorTranslateQuerier struct { + q storage.Querier +} + +func (e errorTranslateQuerier) LabelValues(name string) ([]string, storage.Warnings, error) { + values, warnings, err := e.q.LabelValues(name) + return values, warnings, translateError(err) +} + +func (e errorTranslateQuerier) LabelNames() ([]string, storage.Warnings, error) { + values, warnings, err := e.q.LabelNames() + return values, warnings, translateError(err) +} + +func (e errorTranslateQuerier) Close() error { + return translateError(e.q.Close()) +} + +func (e errorTranslateQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { + s := e.q.Select(sortSeries, hints, matchers...) + return errorTranslateSeriesSet{s} +} + +type errorTranslateChunkQuerier struct { + q storage.ChunkQuerier +} + +func (e errorTranslateChunkQuerier) LabelValues(name string) ([]string, storage.Warnings, error) { + values, warnings, err := e.q.LabelValues(name) + return values, warnings, translateError(err) +} + +func (e errorTranslateChunkQuerier) LabelNames() ([]string, storage.Warnings, error) { + values, warnings, err := e.q.LabelNames() + return values, warnings, translateError(err) +} + +func (e errorTranslateChunkQuerier) Close() error { + return translateError(e.q.Close()) +} + +func (e errorTranslateChunkQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.ChunkSeriesSet { + s := e.q.Select(sortSeries, hints, matchers...) + return errorTranslateChunkSeriesSet{s} +} + +type errorTranslateSeriesSet struct { + s storage.SeriesSet +} + +func (e errorTranslateSeriesSet) Next() bool { + return e.s.Next() +} + +func (e errorTranslateSeriesSet) At() storage.Series { + return e.s.At() +} + +func (e errorTranslateSeriesSet) Err() error { + return translateError(e.s.Err()) +} + +func (e errorTranslateSeriesSet) Warnings() storage.Warnings { + return e.s.Warnings() +} + +type errorTranslateChunkSeriesSet struct { + s storage.ChunkSeriesSet +} + +func (e errorTranslateChunkSeriesSet) Next() bool { + return e.s.Next() +} + +func (e errorTranslateChunkSeriesSet) At() storage.ChunkSeries { + return e.s.At() +} + +func (e errorTranslateChunkSeriesSet) Err() error { + return translateError(e.s.Err()) +} + +func (e errorTranslateChunkSeriesSet) Warnings() storage.Warnings { + return e.s.Warnings() +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/background.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/background.go index 994c49b745239..bfdfb748d894b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/background.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/background.go @@ -11,19 +11,6 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" ) -var ( - droppedWriteBack = promauto.NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "cache_dropped_background_writes_total", - Help: "Total count of dropped write backs to cache.", - }, []string{"name"}) - queueLength = promauto.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: "cortex", - Name: "cache_background_queue_length", - Help: "Length of the cache background write queue.", - }, []string{"name"}) -) - // BackgroundConfig is config for a Background Cache. type BackgroundConfig struct { WriteBackGoroutines int `yaml:"writeback_goroutines"` @@ -54,14 +41,25 @@ type backgroundWrite struct { } // NewBackground returns a new Cache that does stores on background goroutines. -func NewBackground(name string, cfg BackgroundConfig, cache Cache) Cache { +func NewBackground(name string, cfg BackgroundConfig, cache Cache, reg prometheus.Registerer) Cache { c := &backgroundCache{ - Cache: cache, - quit: make(chan struct{}), - bgWrites: make(chan backgroundWrite, cfg.WriteBackBuffer), - name: name, - droppedWriteBack: droppedWriteBack.WithLabelValues(name), - queueLength: queueLength.WithLabelValues(name), + Cache: cache, + quit: make(chan struct{}), + bgWrites: make(chan backgroundWrite, cfg.WriteBackBuffer), + name: name, + droppedWriteBack: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Namespace: "cortex", + Name: "cache_dropped_background_writes_total", + Help: "Total count of dropped write backs to cache.", + ConstLabels: prometheus.Labels{"name": name}, + }), + + queueLength: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ + Namespace: "cortex", + Name: "cache_background_queue_length", + Help: "Length of the cache background write queue.", + ConstLabels: prometheus.Labels{"name": name}, + }), } c.wg.Add(cfg.WriteBackGoroutines) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/cache.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/cache.go index e400e88a32b27..dbbc6b2e8c4fa 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/cache.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/cache.go @@ -6,6 +6,7 @@ import ( "flag" "time" + "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" ) @@ -60,7 +61,7 @@ func (cfg *Config) Validate() error { } // New creates a new Cache using Config. -func New(cfg Config) (Cache, error) { +func New(cfg Config, reg prometheus.Registerer, logger log.Logger) (Cache, error) { if cfg.Cache != nil { return cfg.Cache, nil } @@ -72,8 +73,8 @@ func New(cfg Config) (Cache, error) { cfg.Fifocache.Validity = cfg.DefaultValidity } - if cache := NewFifoCache(cfg.Prefix+"fifocache", cfg.Fifocache); cache != nil { - caches = append(caches, Instrument(cfg.Prefix+"fifocache", cache)) + if cache := NewFifoCache(cfg.Prefix+"fifocache", cfg.Fifocache, reg, logger); cache != nil { + caches = append(caches, Instrument(cfg.Prefix+"fifocache", cache, reg)) } } @@ -86,11 +87,11 @@ func New(cfg Config) (Cache, error) { cfg.Memcache.Expiration = cfg.DefaultValidity } - client := NewMemcachedClient(cfg.MemcacheClient, cfg.Prefix, prometheus.DefaultRegisterer) - cache := NewMemcached(cfg.Memcache, client, cfg.Prefix) + client := NewMemcachedClient(cfg.MemcacheClient, cfg.Prefix, reg, logger) + cache := NewMemcached(cfg.Memcache, client, cfg.Prefix, reg, logger) cacheName := cfg.Prefix + "memcache" - caches = append(caches, NewBackground(cacheName, cfg.Background, Instrument(cacheName, cache))) + caches = append(caches, NewBackground(cacheName, cfg.Background, Instrument(cacheName, cache, reg), reg)) } if cfg.Redis.Endpoint != "" { @@ -98,13 +99,13 @@ func New(cfg Config) (Cache, error) { cfg.Redis.Expiration = cfg.DefaultValidity } cacheName := cfg.Prefix + "redis" - cache := NewRedisCache(cfg.Redis, cacheName, nil) - caches = append(caches, NewBackground(cacheName, cfg.Background, Instrument(cacheName, cache))) + cache := NewRedisCache(cfg.Redis, cacheName, nil, logger) + caches = append(caches, NewBackground(cacheName, cfg.Background, Instrument(cacheName, cache, reg), reg)) } cache := NewTiered(caches) if len(caches) > 1 { - cache = Instrument(cfg.Prefix+"tiered", cache) + cache = Instrument(cfg.Prefix+"tiered", cache, reg) } return cache, nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go index ca331de77e68d..81432d1a1e5b7 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go @@ -9,6 +9,7 @@ import ( "unsafe" "github.com/dustin/go-humanize" + "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" @@ -18,64 +19,6 @@ import ( "github.com/cortexproject/cortex/pkg/util/flagext" ) -var ( - cacheEntriesAdded = promauto.NewCounterVec(prometheus.CounterOpts{ - Namespace: "querier", - Subsystem: "cache", - Name: "added_total", - Help: "The total number of Put calls on the cache", - }, []string{"cache"}) - - cacheEntriesAddedNew = promauto.NewCounterVec(prometheus.CounterOpts{ - Namespace: "querier", - Subsystem: "cache", - Name: "added_new_total", - Help: "The total number of new entries added to the cache", - }, []string{"cache"}) - - cacheEntriesEvicted = promauto.NewCounterVec(prometheus.CounterOpts{ - Namespace: "querier", - Subsystem: "cache", - Name: "evicted_total", - Help: "The total number of evicted entries", - }, []string{"cache"}) - - cacheEntriesCurrent = promauto.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: "querier", - Subsystem: "cache", - Name: "entries", - Help: "The total number of entries", - }, []string{"cache"}) - - cacheTotalGets = promauto.NewCounterVec(prometheus.CounterOpts{ - Namespace: "querier", - Subsystem: "cache", - Name: "gets_total", - Help: "The total number of Get calls", - }, []string{"cache"}) - - cacheTotalMisses = promauto.NewCounterVec(prometheus.CounterOpts{ - Namespace: "querier", - Subsystem: "cache", - Name: "misses_total", - Help: "The total number of Get calls that had no valid entry", - }, []string{"cache"}) - - cacheStaleGets = promauto.NewCounterVec(prometheus.CounterOpts{ - Namespace: "querier", - Subsystem: "cache", - Name: "stale_gets_total", - Help: "The total number of Get calls that had an entry which expired", - }, []string{"cache"}) - - cacheMemoryBytes = promauto.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: "querier", - Subsystem: "cache", - Name: "memory_bytes", - Help: "The current cache size in bytes", - }, []string{"cache"}) -) - const ( elementSize = int(unsafe.Sizeof(list.Element{})) elementPrtSize = int(unsafe.Sizeof(&list.Element{})) @@ -149,20 +92,19 @@ type cacheEntry struct { } // NewFifoCache returns a new initialised FifoCache of size. -// TODO(bwplotka): Fix metrics, get them out of globals, separate or allow prefixing. -func NewFifoCache(name string, cfg FifoCacheConfig) *FifoCache { +func NewFifoCache(name string, cfg FifoCacheConfig, reg prometheus.Registerer, logger log.Logger) *FifoCache { util.WarnExperimentalUse("In-memory (FIFO) cache") if cfg.DeprecatedSize > 0 { flagext.DeprecatedFlagsUsed.Inc() - level.Warn(util.Logger).Log("msg", "running with DEPRECATED flag fifocache.size, use fifocache.max-size-items or fifocache.max-size-bytes instead", "cache", name) + level.Warn(logger).Log("msg", "running with DEPRECATED flag fifocache.size, use fifocache.max-size-items or fifocache.max-size-bytes instead", "cache", name) cfg.MaxSizeItems = cfg.DeprecatedSize } maxSizeBytes, _ := parsebytes(cfg.MaxSizeBytes) if maxSizeBytes == 0 && cfg.MaxSizeItems == 0 { // zero cache capacity - no need to create cache - level.Warn(util.Logger).Log("msg", "neither fifocache.max-size-bytes nor fifocache.max-size-items is set", "cache", name) + level.Warn(logger).Log("msg", "neither fifocache.max-size-bytes nor fifocache.max-size-items is set", "cache", name) return nil } return &FifoCache{ @@ -172,15 +114,69 @@ func NewFifoCache(name string, cfg FifoCacheConfig) *FifoCache { entries: make(map[string]*list.Element), lru: list.New(), - // TODO(bwplotka): There might be simple cache.Cache wrapper for those. - entriesAdded: cacheEntriesAdded.WithLabelValues(name), - entriesAddedNew: cacheEntriesAddedNew.WithLabelValues(name), - entriesEvicted: cacheEntriesEvicted.WithLabelValues(name), - entriesCurrent: cacheEntriesCurrent.WithLabelValues(name), - totalGets: cacheTotalGets.WithLabelValues(name), - totalMisses: cacheTotalMisses.WithLabelValues(name), - staleGets: cacheStaleGets.WithLabelValues(name), - memoryBytes: cacheMemoryBytes.WithLabelValues(name), + entriesAdded: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Namespace: "querier", + Subsystem: "cache", + Name: "added_total", + Help: "The total number of Put calls on the cache", + ConstLabels: prometheus.Labels{"cache": name}, + }), + + entriesAddedNew: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Namespace: "querier", + Subsystem: "cache", + Name: "added_new_total", + Help: "The total number of new entries added to the cache", + ConstLabels: prometheus.Labels{"cache": name}, + }), + + entriesEvicted: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Namespace: "querier", + Subsystem: "cache", + Name: "evicted_total", + Help: "The total number of evicted entries", + ConstLabels: prometheus.Labels{"cache": name}, + }), + + entriesCurrent: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ + Namespace: "querier", + Subsystem: "cache", + Name: "entries", + Help: "The total number of entries", + ConstLabels: prometheus.Labels{"cache": name}, + }), + + totalGets: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Namespace: "querier", + Subsystem: "cache", + Name: "gets_total", + Help: "The total number of Get calls", + ConstLabels: prometheus.Labels{"cache": name}, + }), + + totalMisses: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Namespace: "querier", + Subsystem: "cache", + Name: "misses_total", + Help: "The total number of Get calls that had no valid entry", + ConstLabels: prometheus.Labels{"cache": name}, + }), + + staleGets: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Namespace: "querier", + Subsystem: "cache", + Name: "stale_gets_total", + Help: "The total number of Get calls that had an entry which expired", + ConstLabels: prometheus.Labels{"cache": name}, + }), + + memoryBytes: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ + Namespace: "querier", + Subsystem: "cache", + Name: "memory_bytes", + Help: "The current cache size in bytes", + ConstLabels: prometheus.Labels{"cache": name}, + }), } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/instrumented.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/instrumented.go index c5c43b21cec18..ca27d4a3b4e40 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/instrumented.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/instrumented.go @@ -6,58 +6,52 @@ import ( ot "github.com/opentracing/opentracing-go" otlog "github.com/opentracing/opentracing-go/log" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" instr "github.com/weaveworks/common/instrument" ) -var ( - requestDuration = instr.NewHistogramCollector(prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: "cortex", - Name: "cache_request_duration_seconds", - Help: "Total time spent in seconds doing cache requests.", - // Cache requests are very quick: smallest bucket is 16us, biggest is 1s. - Buckets: prometheus.ExponentialBuckets(0.000016, 4, 8), - }, []string{"method", "status_code"})) - - fetchedKeys = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "cache_fetched_keys", - Help: "Total count of keys requested from cache.", - }, []string{"name"}) - - hits = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "cache_hits", - Help: "Total count of keys found in cache.", - }, []string{"name"}) - - valueSize = prometheus.NewHistogramVec(prometheus.HistogramOpts{ +// Instrument returns an instrumented cache. +func Instrument(name string, cache Cache, reg prometheus.Registerer) Cache { + valueSize := promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ Namespace: "cortex", Name: "cache_value_size_bytes", Help: "Size of values in the cache.", // Cached chunks are generally in the KBs, but cached index can // get big. Histogram goes from 1KB to 4MB. // 1024 * 4^(7-1) = 4MB - Buckets: prometheus.ExponentialBuckets(1024, 4, 7), - }, []string{"name", "method"}) -) + Buckets: prometheus.ExponentialBuckets(1024, 4, 7), + ConstLabels: prometheus.Labels{"name": name}, + }, []string{"method"}) -func init() { - requestDuration.Register() - prometheus.MustRegister(fetchedKeys) - prometheus.MustRegister(hits) - prometheus.MustRegister(valueSize) -} - -// Instrument returns an instrumented cache. -func Instrument(name string, cache Cache) Cache { return &instrumentedCache{ name: name, Cache: cache, - fetchedKeys: fetchedKeys.WithLabelValues(name), - hits: hits.WithLabelValues(name), - storedValueSize: valueSize.WithLabelValues(name, "store"), - fetchedValueSize: valueSize.WithLabelValues(name, "fetch"), + requestDuration: instr.NewHistogramCollector(promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "cortex", + Name: "cache_request_duration_seconds", + Help: "Total time spent in seconds doing cache requests.", + // Cache requests are very quick: smallest bucket is 16us, biggest is 1s. + Buckets: prometheus.ExponentialBuckets(0.000016, 4, 8), + ConstLabels: prometheus.Labels{"name": name}, + }, []string{"method", "status_code"})), + + fetchedKeys: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Namespace: "cortex", + Name: "cache_fetched_keys", + Help: "Total count of keys requested from cache.", + ConstLabels: prometheus.Labels{"name": name}, + }), + + hits: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Namespace: "cortex", + Name: "cache_hits", + Help: "Total count of keys found in cache.", + ConstLabels: prometheus.Labels{"name": name}, + }), + + storedValueSize: valueSize.WithLabelValues("store"), + fetchedValueSize: valueSize.WithLabelValues("fetch"), } } @@ -67,6 +61,7 @@ type instrumentedCache struct { fetchedKeys, hits prometheus.Counter storedValueSize, fetchedValueSize prometheus.Observer + requestDuration *instr.HistogramCollector } func (i *instrumentedCache) Store(ctx context.Context, keys []string, bufs [][]byte) { @@ -75,7 +70,7 @@ func (i *instrumentedCache) Store(ctx context.Context, keys []string, bufs [][]b } method := i.name + ".store" - _ = instr.CollectedRequest(ctx, method, requestDuration, instr.ErrorCode, func(ctx context.Context) error { + _ = instr.CollectedRequest(ctx, method, i.requestDuration, instr.ErrorCode, func(ctx context.Context) error { sp := ot.SpanFromContext(ctx) sp.LogFields(otlog.Int("keys", len(keys))) i.Cache.Store(ctx, keys, bufs) @@ -91,7 +86,7 @@ func (i *instrumentedCache) Fetch(ctx context.Context, keys []string) ([]string, method = i.name + ".fetch" ) - _ = instr.CollectedRequest(ctx, method, requestDuration, instr.ErrorCode, func(ctx context.Context) error { + _ = instr.CollectedRequest(ctx, method, i.requestDuration, instr.ErrorCode, func(ctx context.Context) error { sp := ot.SpanFromContext(ctx) sp.LogFields(otlog.Int("keys requested", len(keys))) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached.go index 0b14180e11f48..c2101e6916836 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached.go @@ -9,6 +9,7 @@ import ( "time" "github.com/bradfitz/gomemcache/memcache" + "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" opentracing "github.com/opentracing/opentracing-go" otlog "github.com/opentracing/opentracing-go/log" @@ -19,16 +20,6 @@ import ( "github.com/cortexproject/cortex/pkg/util" ) -var ( - memcacheRequestDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: "cortex", - Name: "memcache_request_duration_seconds", - Help: "Total time spent in seconds doing memcache requests.", - // Memecache requests are very quick: smallest bucket is 16us, biggest is 1s - Buckets: prometheus.ExponentialBuckets(0.000016, 4, 8), - }, []string{"method", "status_code", "name"}) -) - type observableVecCollector struct { v prometheus.ObserverVec } @@ -64,20 +55,26 @@ type Memcached struct { wg sync.WaitGroup inputCh chan *work + + logger log.Logger } // NewMemcached makes a new Memcache. -// TODO(bwplotka): Fix metrics, get them out of globals, separate or allow prefixing. -// TODO(bwplotka): Remove globals & util packages from cache package entirely (e.g util.Logger). -func NewMemcached(cfg MemcachedConfig, client MemcachedClient, name string) *Memcached { +func NewMemcached(cfg MemcachedConfig, client MemcachedClient, name string, reg prometheus.Registerer, logger log.Logger) *Memcached { c := &Memcached{ cfg: cfg, memcache: client, name: name, + logger: logger, requestDuration: observableVecCollector{ - v: memcacheRequestDuration.MustCurryWith(prometheus.Labels{ - "name": name, - }), + v: promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "cortex", + Name: "memcache_request_duration_seconds", + Help: "Total time spent in seconds doing memcache requests.", + // Memecache requests are very quick: smallest bucket is 16us, biggest is 1s + Buckets: prometheus.ExponentialBuckets(0.000016, 4, 8), + ConstLabels: prometheus.Labels{"name": name}, + }, []string{"method", "status_code"}), }, } @@ -161,7 +158,7 @@ func (c *Memcached) fetch(ctx context.Context, keys []string) (found []string, b // Memcached returns partial results even on error. if err != nil { sp.LogFields(otlog.Error(err)) - level.Error(util.Logger).Log("msg", "Failed to get keys from memcached", "err", err) + level.Error(c.logger).Log("msg", "Failed to get keys from memcached", "err", err) } return err }) @@ -234,7 +231,7 @@ func (c *Memcached) Store(ctx context.Context, keys []string, bufs [][]byte) { return c.memcache.Set(&item) }) if err != nil { - level.Error(util.Logger).Log("msg", "failed to put to memcached", "name", c.name, "err", err) + level.Error(c.logger).Log("msg", "failed to put to memcached", "name", c.name, "err", err) } } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client.go index df0969dc78f3b..6a0b52a0ff5cd 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client.go @@ -11,6 +11,7 @@ import ( "time" "github.com/bradfitz/gomemcache/memcache" + "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" @@ -20,14 +21,6 @@ import ( "github.com/cortexproject/cortex/pkg/util" ) -var ( - memcacheServersDiscovered = promauto.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: "cortex", - Name: "memcache_client_servers", - Help: "The number of memcache servers discovered.", - }, []string{"name"}) -) - // MemcachedClient interface exists for mocking memcacheClient. type MemcachedClient interface { GetMulti(keys []string) (map[string]*memcache.Item, error) @@ -55,6 +48,8 @@ type memcachedClient struct { wait sync.WaitGroup numServers prometheus.Gauge + + logger log.Logger } // MemcachedClientConfig defines how a MemcachedClient should be constructed. @@ -81,7 +76,7 @@ func (cfg *MemcachedClientConfig) RegisterFlagsWithPrefix(prefix, description st // NewMemcachedClient creates a new MemcacheClient that gets its server list // from SRV and updates the server list on a regular basis. -func NewMemcachedClient(cfg MemcachedClientConfig, name string, r prometheus.Registerer) MemcachedClient { +func NewMemcachedClient(cfg MemcachedClientConfig, name string, r prometheus.Registerer, logger log.Logger) MemcachedClient { var selector serverSelector if cfg.ConsistentHash { selector = &MemcachedJumpHashSelector{} @@ -102,10 +97,16 @@ func NewMemcachedClient(cfg MemcachedClientConfig, name string, r prometheus.Reg serverList: selector, hostname: cfg.Host, service: cfg.Service, - provider: dns.NewProvider(util.Logger, dnsProviderRegisterer, dns.GolangResolverType), + logger: logger, + provider: dns.NewProvider(logger, dnsProviderRegisterer, dns.GolangResolverType), quit: make(chan struct{}), - numServers: memcacheServersDiscovered.WithLabelValues(name), + numServers: promauto.With(r).NewGauge(prometheus.GaugeOpts{ + Namespace: "cortex", + Name: "memcache_client_servers", + Help: "The number of memcache servers discovered.", + ConstLabels: prometheus.Labels{"name": name}, + }), } if len(cfg.Addresses) > 0 { @@ -115,7 +116,7 @@ func NewMemcachedClient(cfg MemcachedClientConfig, name string, r prometheus.Reg err := newClient.updateMemcacheServers() if err != nil { - level.Error(util.Logger).Log("msg", "error setting memcache servers to host", "host", cfg.Host, "err", err) + level.Error(logger).Log("msg", "error setting memcache servers to host", "host", cfg.Host, "err", err) } newClient.wait.Add(1) @@ -153,7 +154,7 @@ func (c *memcachedClient) updateLoop(updateInterval time.Duration) { case <-ticker.C: err := c.updateMemcacheServers() if err != nil { - level.Warn(util.Logger).Log("msg", "error updating memcache servers", "err", err) + level.Warn(c.logger).Log("msg", "error updating memcache servers", "err", err) } case <-c.quit: ticker.Stop() diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_cache.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_cache.go index fac33bb4589b5..382290e30ba86 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_cache.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_cache.go @@ -5,6 +5,7 @@ import ( "flag" "time" + "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/gomodule/redigo/redis" @@ -18,6 +19,7 @@ type RedisCache struct { expiration int timeout time.Duration pool *redis.Pool + logger log.Logger } // RedisConfig defines how a RedisCache should be constructed. @@ -49,7 +51,7 @@ func (cfg *RedisConfig) RegisterFlagsWithPrefix(prefix, description string, f *f } // NewRedisCache creates a new RedisCache -func NewRedisCache(cfg RedisConfig, name string, pool *redis.Pool) *RedisCache { +func NewRedisCache(cfg RedisConfig, name string, pool *redis.Pool, logger log.Logger) *RedisCache { util.WarnExperimentalUse("Redis cache") // pool != nil only in unit tests if pool == nil { @@ -82,10 +84,11 @@ func NewRedisCache(cfg RedisConfig, name string, pool *redis.Pool) *RedisCache { timeout: cfg.Timeout, name: name, pool: pool, + logger: logger, } if err := cache.ping(context.Background()); err != nil { - level.Error(util.Logger).Log("msg", "error connecting to redis", "endpoint", cfg.Endpoint, "err", err) + level.Error(logger).Log("msg", "error connecting to redis", "endpoint", cfg.Endpoint, "err", err) } return cache @@ -96,7 +99,7 @@ func (c *RedisCache) Fetch(ctx context.Context, keys []string) (found []string, data, err := c.mget(ctx, keys) if err != nil { - level.Error(util.Logger).Log("msg", "failed to get from redis", "name", c.name, "err", err) + level.Error(c.logger).Log("msg", "failed to get from redis", "name", c.name, "err", err) missed = make([]string, len(keys)) copy(missed, keys) return @@ -116,7 +119,7 @@ func (c *RedisCache) Fetch(ctx context.Context, keys []string) (found []string, func (c *RedisCache) Store(ctx context.Context, keys []string, bufs [][]byte) { err := c.mset(ctx, keys, bufs, c.expiration) if err != nil { - level.Error(util.Logger).Log("msg", "failed to put to redis", "name", c.name, "err", err) + level.Error(c.logger).Log("msg", "failed to put to redis", "name", c.name, "err", err) } } @@ -126,7 +129,7 @@ func (c *RedisCache) Stop() { } // mset adds key-value pairs to the cache. -func (c *RedisCache) mset(ctx context.Context, keys []string, bufs [][]byte, ttl int) error { +func (c *RedisCache) mset(_ context.Context, keys []string, bufs [][]byte, ttl int) error { conn := c.pool.Get() defer conn.Close() @@ -143,7 +146,7 @@ func (c *RedisCache) mset(ctx context.Context, keys []string, bufs [][]byte, ttl } // mget retrieves values from the cache. -func (c *RedisCache) mget(ctx context.Context, keys []string) ([][]byte, error) { +func (c *RedisCache) mget(_ context.Context, keys []string) ([][]byte, error) { intf := make([]interface{}, len(keys)) for i, key := range keys { intf[i] = key @@ -155,7 +158,7 @@ func (c *RedisCache) mget(ctx context.Context, keys []string) ([][]byte, error) return redis.ByteSlices(redis.DoWithTimeout(conn, c.timeout, "MGET", intf...)) } -func (c *RedisCache) ping(ctx context.Context) error { +func (c *RedisCache) ping(_ context.Context) error { conn := c.pool.Get() defer conn.Close() diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/snappy.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/snappy.go index 2fc2308f4844f..d2ee606eda279 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/snappy.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/snappy.go @@ -3,20 +3,21 @@ package cache import ( "context" + "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/golang/snappy" - - "github.com/cortexproject/cortex/pkg/util" ) type snappyCache struct { - next Cache + next Cache + logger log.Logger } // NewSnappy makes a new snappy encoding cache wrapper. -func NewSnappy(next Cache) Cache { +func NewSnappy(next Cache, logger log.Logger) Cache { return &snappyCache{ - next: next, + next: next, + logger: logger, } } @@ -35,7 +36,7 @@ func (s *snappyCache) Fetch(ctx context.Context, keys []string) ([]string, [][]b for _, buf := range bufs { d, err := snappy.Decode(nil, buf) if err != nil { - level.Error(util.Logger).Log("msg", "failed to decode cache entry", "err", err) + level.Error(s.logger).Log("msg", "failed to decode cache entry", "err", err) return nil, nil, keys } ds = append(ds, d) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go index c18a38a167252..061a9b1c638a1 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go @@ -162,6 +162,7 @@ func (c *Fetcher) FetchChunks(ctx context.Context, chunks []Chunk, keys []string } if err != nil { + // Don't rely on Cortex error translation here. return nil, promql.ErrStorage{Err: err} } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_index_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_index_client.go index f5822bdc5a7e9..434bb40c75422 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_index_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_index_client.go @@ -11,9 +11,9 @@ import ( "time" "cloud.google.com/go/bigtable" + "github.com/go-kit/kit/log" ot "github.com/opentracing/opentracing-go" otlog "github.com/opentracing/opentracing-go/log" - "github.com/pkg/errors" "github.com/cortexproject/cortex/pkg/chunk" @@ -54,6 +54,10 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { cfg.GRPCClientConfig.RegisterFlagsWithPrefix("bigtable", f) } +func (cfg *Config) Validate(log log.Logger) error { + return cfg.GRPCClientConfig.Validate(log) +} + // storageClientColumnKey implements chunk.storageClient for GCP. type storageClientColumnKey struct { cfg Config diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go index 0ef1a401a1451..5ac25651ea8f0 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go @@ -271,8 +271,14 @@ func (b *BoltIndexClient) QueryDB(ctx context.Context, db *bbolt.DB, query chunk break } - batch.rangeValue = k[len(rowPrefix):] - batch.value = v + // make a copy since k, v are only valid for the life of the transaction. + // See: https://godoc.org/github.com/boltdb/bolt#Cursor.Seek + batch.rangeValue = make([]byte, len(k)-len(rowPrefix)) + copy(batch.rangeValue, k[len(rowPrefix):]) + + batch.value = make([]byte, len(v)) + copy(batch.value, v) + if !callback(query, &batch) { break } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go index 612c12c5f97c9..405dac5359e15 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go @@ -25,9 +25,10 @@ const ( ) var ( - errInvalidSchemaVersion = errors.New("invalid schema version") - errInvalidTablePeriod = errors.New("the table period must be a multiple of 24h (1h for schema v1)") - errConfigFileNotSet = errors.New("schema config file needs to be set") + errInvalidSchemaVersion = errors.New("invalid schema version") + errInvalidTablePeriod = errors.New("the table period must be a multiple of 24h (1h for schema v1)") + errConfigFileNotSet = errors.New("schema config file needs to be set") + errConfigChunkPrefixNotSet = errors.New("schema config for chunks is missing the 'prefix' setting") ) // PeriodConfig defines the schema and tables to use for a period of time @@ -148,6 +149,22 @@ func (cfg *SchemaConfig) ForEachAfter(t model.Time, f func(config *PeriodConfig) } } +func validateChunks(cfg PeriodConfig) error { + objectStore := cfg.IndexType + if cfg.ObjectType != "" { + objectStore = cfg.ObjectType + } + switch objectStore { + case "cassandra", "aws-dynamo", "bigtable-hashed", "gcp", "gcp-columnkey", "bigtable", "grpc-store": + if cfg.ChunkTables.Prefix == "" { + return errConfigChunkPrefixNotSet + } + return nil + default: + return nil + } +} + // CreateSchema returns the schema defined by the PeriodConfig func (cfg PeriodConfig) CreateSchema() (BaseSchema, error) { buckets, bucketsPeriod := cfg.createBucketsFunc() @@ -209,6 +226,11 @@ func (cfg *PeriodConfig) applyDefaults() { // Validate the period config. func (cfg PeriodConfig) validate() error { + validateError := validateChunks(cfg) + if validateError != nil { + return validateError + } + _, err := cfg.CreateSchema() return err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go index ee032300b8089..6c14f4962852c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go @@ -1,9 +1,12 @@ package storage import ( - io "io" + "io" "time" + "github.com/go-kit/kit/log" + "github.com/prometheus/client_golang/prometheus" + "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/validation" @@ -25,10 +28,12 @@ func (f fixture) Clients() (chunk.IndexClient, chunk.Client, chunk.TableClient, return nil, nil, nil, chunk.SchemaConfig{}, nil, err } indexClient, chunkClient, tableClient, schemaConfig, closer, err := f.fixture.Clients() + reg := prometheus.NewRegistry() + logger := log.NewNopLogger() indexClient = newCachingIndexClient(indexClient, cache.NewFifoCache("index-fifo", cache.FifoCacheConfig{ MaxSizeItems: 500, Validity: 5 * time.Minute, - }), 5*time.Minute, limits) + }, reg, logger), 5*time.Minute, limits, logger) return indexClient, chunkClient, tableClient, schemaConfig, closer, err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go index cf9e37f1ca8d0..6408a5c5eab08 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go @@ -5,6 +5,7 @@ import ( "sync" "time" + "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/gogo/protobuf/proto" "github.com/prometheus/client_golang/prometheus" @@ -14,7 +15,6 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/chunk/cache" chunk_util "github.com/cortexproject/cortex/pkg/chunk/util" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/spanlogger" ) @@ -46,18 +46,20 @@ type cachingIndexClient struct { cache cache.Cache validity time.Duration limits StoreLimits + logger log.Logger } -func newCachingIndexClient(client chunk.IndexClient, c cache.Cache, validity time.Duration, limits StoreLimits) chunk.IndexClient { +func newCachingIndexClient(client chunk.IndexClient, c cache.Cache, validity time.Duration, limits StoreLimits, logger log.Logger) chunk.IndexClient { if c == nil || cache.IsEmptyTieredCache(c) { return client } return &cachingIndexClient{ IndexClient: client, - cache: cache.NewSnappy(c), + cache: cache.NewSnappy(c, logger), validity: validity, limits: limits, + logger: logger, } } @@ -226,7 +228,7 @@ func (s *cachingIndexClient) cacheStore(ctx context.Context, keys []string, batc hashed = append(hashed, cache.HashKey(keys[i])) out, err := proto.Marshal(&batches[i]) if err != nil { - level.Warn(util.Logger).Log("msg", "error marshalling ReadBatch", "err", err) + level.Warn(s.logger).Log("msg", "error marshalling ReadBatch", "err", err) cacheEncodeErrs.Inc() return } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go index 1629ead9247fc..d63b1d958b1f8 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" @@ -28,7 +29,7 @@ import ( // Supported storage engines const ( StorageEngineChunks = "chunks" - StorageEngineTSDB = "tsdb" + StorageEngineBlocks = "blocks" ) type indexStoreFactories struct { @@ -91,19 +92,22 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { cfg.Swift.RegisterFlags(f) cfg.GrpcConfig.RegisterFlags(f) - f.StringVar(&cfg.Engine, "store.engine", "chunks", "The storage engine to use: chunks or tsdb. Be aware tsdb is experimental and shouldn't be used in production.") + f.StringVar(&cfg.Engine, "store.engine", "chunks", "The storage engine to use: chunks or blocks. Be aware that blocks storage is experimental and shouldn't be used in production.") cfg.IndexQueriesCacheConfig.RegisterFlagsWithPrefix("store.index-cache-read.", "Cache config for index entry reading. ", f) f.DurationVar(&cfg.IndexCacheValidity, "store.index-cache-validity", 5*time.Minute, "Cache validity for active index entries. Should be no higher than -ingester.max-chunk-idle.") } // Validate config and returns error on failure func (cfg *Config) Validate() error { - if cfg.Engine != StorageEngineChunks && cfg.Engine != StorageEngineTSDB { + if cfg.Engine != StorageEngineChunks && cfg.Engine != StorageEngineBlocks { return errors.New("unsupported storage engine") } if err := cfg.CassandraStorageConfig.Validate(); err != nil { return errors.Wrap(err, "invalid Cassandra Storage config") } + if err := cfg.GCPStorageConfig.Validate(util.Logger); err != nil { + return errors.Wrap(err, "invalid GCP Storage Storage config") + } if err := cfg.Swift.Validate(); err != nil { return errors.Wrap(err, "invalid Swift Storage config") } @@ -114,22 +118,30 @@ func (cfg *Config) Validate() error { } // NewStore makes the storage clients based on the configuration. -func NewStore(cfg Config, storeCfg chunk.StoreConfig, schemaCfg chunk.SchemaConfig, limits StoreLimits, reg prometheus.Registerer, cacheGenNumLoader chunk.CacheGenNumLoader) (chunk.Store, error) { +func NewStore( + cfg Config, + storeCfg chunk.StoreConfig, + schemaCfg chunk.SchemaConfig, + limits StoreLimits, + reg prometheus.Registerer, + cacheGenNumLoader chunk.CacheGenNumLoader, + logger log.Logger, +) (chunk.Store, error) { chunkMetrics := newChunkClientMetrics(reg) - indexReadCache, err := cache.New(cfg.IndexQueriesCacheConfig) + indexReadCache, err := cache.New(cfg.IndexQueriesCacheConfig, reg, logger) if err != nil { return nil, err } - writeDedupeCache, err := cache.New(storeCfg.WriteDedupeCacheConfig) + writeDedupeCache, err := cache.New(storeCfg.WriteDedupeCacheConfig, reg, logger) if err != nil { return nil, err } chunkCacheCfg := storeCfg.ChunkCacheConfig chunkCacheCfg.Prefix = "chunks" - chunksCache, err := cache.New(chunkCacheCfg) + chunksCache, err := cache.New(chunkCacheCfg, reg, logger) if err != nil { return nil, err } @@ -160,7 +172,7 @@ func NewStore(cfg Config, storeCfg chunk.StoreConfig, schemaCfg chunk.SchemaConf if err != nil { return nil, errors.Wrap(err, "error creating index client") } - index = newCachingIndexClient(index, indexReadCache, cfg.IndexCacheValidity, limits) + index = newCachingIndexClient(index, indexReadCache, cfg.IndexCacheValidity, limits, logger) objectStoreType := s.ObjectType if objectStoreType == "" { diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go index c4426b1cef850..8d5ec0bbf50e5 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go +++ b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go @@ -78,7 +78,7 @@ type Compactor struct { services.Service compactorCfg Config - storageCfg cortex_tsdb.Config + storageCfg cortex_tsdb.BlocksStorageConfig logger log.Logger parentLogger log.Logger registerer prometheus.Registerer @@ -118,7 +118,7 @@ type Compactor struct { } // NewCompactor makes a new Compactor. -func NewCompactor(compactorCfg Config, storageCfg cortex_tsdb.Config, logger log.Logger, registerer prometheus.Registerer) (*Compactor, error) { +func NewCompactor(compactorCfg Config, storageCfg cortex_tsdb.BlocksStorageConfig, logger log.Logger, registerer prometheus.Registerer) (*Compactor, error) { createBucketClientAndTsdbCompactor := func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, error) { bucketClient, err := cortex_tsdb.NewBucketClient(ctx, storageCfg, "compactor", logger, registerer) if err != nil { @@ -139,7 +139,7 @@ func NewCompactor(compactorCfg Config, storageCfg cortex_tsdb.Config, logger log func newCompactor( compactorCfg Config, - storageCfg cortex_tsdb.Config, + storageCfg cortex_tsdb.BlocksStorageConfig, logger log.Logger, registerer prometheus.Registerer, createBucketClientAndTsdbCompactor func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, error), diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go index 5b06530065b62..b7403bce7759c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go +++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go @@ -94,7 +94,7 @@ type Config struct { QueryRange queryrange.Config `yaml:"query_range"` TableManager chunk.TableManagerConfig `yaml:"table_manager"` Encoding encoding.Config `yaml:"-"` // No yaml for this, it only works with flags. - TSDB tsdb.Config `yaml:"tsdb"` + BlocksStorage tsdb.BlocksStorageConfig `yaml:"blocks_storage"` Compactor compactor.Config `yaml:"compactor"` StoreGateway storegateway.Config `yaml:"store_gateway"` PurgerConfig purger.Config `yaml:"purger"` @@ -133,7 +133,7 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) { c.QueryRange.RegisterFlags(f) c.TableManager.RegisterFlags(f) c.Encoding.RegisterFlags(f) - c.TSDB.RegisterFlags(f) + c.BlocksStorage.RegisterFlags(f) c.Compactor.RegisterFlags(f) c.StoreGateway.RegisterFlags(f) c.PurgerConfig.RegisterFlags(f) @@ -166,7 +166,7 @@ func (c *Config) Validate(log log.Logger) error { if err := c.Ruler.Validate(); err != nil { return errors.Wrap(err, "invalid ruler config") } - if err := c.TSDB.Validate(); err != nil { + if err := c.BlocksStorage.Validate(); err != nil { return errors.Wrap(err, "invalid TSDB config") } if err := c.LimitsConfig.Validate(c.Distributor.ShardByAllLabels); err != nil { @@ -178,11 +178,17 @@ func (c *Config) Validate(log log.Logger) error { if err := c.Querier.Validate(); err != nil { return errors.Wrap(err, "invalid querier config") } + if err := c.IngesterClient.Validate(log); err != nil { + return errors.Wrap(err, "invalid ingester_client config") + } + if err := c.Worker.Validate(log); err != nil { + return errors.Wrap(err, "invalid frontend_worker config") + } if err := c.QueryRange.Validate(log); err != nil { - return errors.Wrap(err, "invalid queryrange config") + return errors.Wrap(err, "invalid query_range config") } if err := c.TableManager.Validate(); err != nil { - return errors.Wrap(err, "invalid tablemanager config") + return errors.Wrap(err, "invalid table_manager config") } return nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go index 25f03ae5e5181..a10486102e480 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go +++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go @@ -255,14 +255,14 @@ func initQueryableForEngine(engine string, cfg Config, chunkStore chunk.Store, l } return querier.NewChunkStoreQueryable(cfg.Querier, chunkStore), nil - case storage.StorageEngineTSDB: + case storage.StorageEngineBlocks: // When running in single binary, if the blocks sharding is disabled and no custom // store-gateway address has been configured, we can set it to the running process. if cfg.Target == All && !cfg.StoreGateway.ShardingEnabled && cfg.Querier.StoreGatewayAddresses == "" { cfg.Querier.StoreGatewayAddresses = fmt.Sprintf("127.0.0.1:%d", cfg.Server.GRPCListenPort) } - return querier.NewBlocksStoreQueryableFromConfig(cfg.Querier, cfg.StoreGateway, cfg.TSDB, limits, util.Logger, reg) + return querier.NewBlocksStoreQueryableFromConfig(cfg.Querier, cfg.StoreGateway, cfg.BlocksStorage, limits, util.Logger, reg) default: return nil, fmt.Errorf("unknown storage engine '%s'", engine) @@ -270,8 +270,8 @@ func initQueryableForEngine(engine string, cfg Config, chunkStore chunk.Store, l } func (t *Cortex) tsdbIngesterConfig() { - t.Cfg.Ingester.TSDBEnabled = t.Cfg.Storage.Engine == storage.StorageEngineTSDB - t.Cfg.Ingester.TSDBConfig = t.Cfg.TSDB + t.Cfg.Ingester.BlocksStorageEnabled = t.Cfg.Storage.Engine == storage.StorageEngineBlocks + t.Cfg.Ingester.BlocksStorageConfig = t.Cfg.BlocksStorage } func (t *Cortex) initIngester() (serv services.Service, err error) { @@ -316,7 +316,7 @@ func (t *Cortex) initChunkStore() (serv services.Service, err error) { return } - t.Store, err = storage.NewStore(t.Cfg.Storage, t.Cfg.ChunkStore, t.Cfg.Schema, t.Overrides, prometheus.DefaultRegisterer, t.TombstonesLoader) + t.Store, err = storage.NewStore(t.Cfg.Storage, t.Cfg.ChunkStore, t.Cfg.Schema, t.Overrides, prometheus.DefaultRegisterer, t.TombstonesLoader, util.Logger) if err != nil { return } @@ -407,7 +407,7 @@ func (t *Cortex) initQueryFrontend() (serv services.Service, err error) { } func (t *Cortex) initTableManager() (services.Service, error) { - if t.Cfg.Storage.Engine == storage.StorageEngineTSDB { + if t.Cfg.Storage.Engine == storage.StorageEngineBlocks { return nil, nil // table manager isn't used in v2 } @@ -487,7 +487,18 @@ func (t *Cortex) initRuler() (serv services.Service, err error) { rulerRegisterer := prometheus.WrapRegistererWith(prometheus.Labels{"engine": "ruler"}, prometheus.DefaultRegisterer) queryable, engine := querier.New(t.Cfg.Querier, t.Overrides, t.Distributor, t.StoreQueryables, t.TombstonesLoader, rulerRegisterer) - t.Ruler, err = ruler.NewRuler(t.Cfg.Ruler, engine, queryable, t.Distributor, prometheus.DefaultRegisterer, util.Logger, t.RulerStorage) + t.Ruler, err = ruler.NewRuler( + t.Cfg.Ruler, + ruler.DefaultTenantManagerFactory( + t.Cfg.Ruler, + t.Distributor, + queryable, + engine, + ), + prometheus.DefaultRegisterer, + util.Logger, + t.RulerStorage, + ) if err != nil { return } @@ -526,7 +537,7 @@ func (t *Cortex) initCompactor() (serv services.Service, err error) { t.Cfg.Compactor.ShardingRing.ListenPort = t.Cfg.Server.GRPCListenPort t.Cfg.Compactor.ShardingRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV - t.Compactor, err = compactor.NewCompactor(t.Cfg.Compactor, t.Cfg.TSDB, util.Logger, prometheus.DefaultRegisterer) + t.Compactor, err = compactor.NewCompactor(t.Cfg.Compactor, t.Cfg.BlocksStorage, util.Logger, prometheus.DefaultRegisterer) if err != nil { return } @@ -537,14 +548,14 @@ func (t *Cortex) initCompactor() (serv services.Service, err error) { } func (t *Cortex) initStoreGateway() (serv services.Service, err error) { - if t.Cfg.Storage.Engine != storage.StorageEngineTSDB { + if t.Cfg.Storage.Engine != storage.StorageEngineBlocks { return nil, nil } t.Cfg.StoreGateway.ShardingRing.ListenPort = t.Cfg.Server.GRPCListenPort t.Cfg.StoreGateway.ShardingRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV - t.StoreGateway, err = storegateway.NewStoreGateway(t.Cfg.StoreGateway, t.Cfg.TSDB, t.Cfg.Server.LogLevel, util.Logger, prometheus.DefaultRegisterer) + t.StoreGateway, err = storegateway.NewStoreGateway(t.Cfg.StoreGateway, t.Cfg.BlocksStorage, t.Overrides, t.Cfg.Server.LogLevel, util.Logger, prometheus.DefaultRegisterer) if err != nil { return nil, err } @@ -630,7 +641,7 @@ func (t *Cortex) setupModuleManager() error { Configs: {API}, AlertManager: {API}, Compactor: {API}, - StoreGateway: {API}, + StoreGateway: {API, Overrides}, Purger: {Store, DeleteRequestsStore, API}, All: {QueryFrontend, Querier, Ingester, Distributor, TableManager, Purger, StoreGateway, Ruler}, } diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go index fed48e51af9b9..a7813c6453fcd 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go @@ -7,7 +7,6 @@ import ( "github.com/opentracing/opentracing-go" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/promql" "github.com/weaveworks/common/instrument" "github.com/weaveworks/common/user" @@ -25,12 +24,12 @@ func (d *Distributor) Query(ctx context.Context, from, to model.Time, matchers . err := instrument.CollectedRequest(ctx, "Distributor.Query", queryDuration, instrument.ErrorCode, func(ctx context.Context) error { replicationSet, req, err := d.queryPrep(ctx, from, to, matchers...) if err != nil { - return promql.ErrStorage{Err: err} + return err } matrix, err = d.queryIngesters(ctx, replicationSet, req) if err != nil { - return promql.ErrStorage{Err: err} + return err } if s := opentracing.SpanFromContext(ctx); s != nil { @@ -47,12 +46,12 @@ func (d *Distributor) QueryStream(ctx context.Context, from, to model.Time, matc err := instrument.CollectedRequest(ctx, "Distributor.QueryStream", queryDuration, instrument.ErrorCode, func(ctx context.Context) error { replicationSet, req, err := d.queryPrep(ctx, from, to, matchers...) if err != nil { - return promql.ErrStorage{Err: err} + return err } result, err = d.queryIngesterStream(ctx, replicationSet, req) if err != nil { - return promql.ErrStorage{Err: err} + return err } if s := opentracing.SpanFromContext(ctx); s != nil { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/client.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/client.go index 8c5d3b54d3088..7ae169785fc8f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/client.go @@ -3,10 +3,10 @@ package client import ( "flag" + "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "google.golang.org/grpc" - _ "google.golang.org/grpc/encoding/gzip" // get gzip compressor registered "google.golang.org/grpc/health/grpc_health_v1" "github.com/cortexproject/cortex/pkg/util/grpcclient" @@ -62,3 +62,7 @@ type Config struct { func (cfg *Config) RegisterFlags(f *flag.FlagSet) { cfg.GRPCClientConfig.RegisterFlagsWithPrefix("ingester.client", f) } + +func (cfg *Config) Validate(log log.Logger) error { + return cfg.GRPCClientConfig.Validate(log) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/flush.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/flush.go index 34e3a11e25ed5..9c619f7877826 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/flush.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/flush.go @@ -24,7 +24,7 @@ const ( // Flush triggers a flush of all the chunks and closes the flush queues. // Called from the Lifecycler as part of the ingester shutdown. func (i *Ingester) Flush() { - if i.cfg.TSDBEnabled { + if i.cfg.BlocksStorageEnabled { i.v2LifecyclerFlush() return } @@ -45,7 +45,7 @@ func (i *Ingester) Flush() { // FlushHandler triggers a flush of all in memory chunks. Mainly used for // local testing. func (i *Ingester) FlushHandler(w http.ResponseWriter, r *http.Request) { - if i.cfg.TSDBEnabled { + if i.cfg.BlocksStorageEnabled { i.v2FlushHandler(w, r) return } @@ -106,6 +106,12 @@ const ( reasonIdle reasonStale reasonSpreadFlush + // Following are flush outcomes + noUser + noSeries + noChunks + flushError + maxFlushReason // Used for testing String() method. Should be last. ) func (f flushReason) String() string { @@ -124,6 +130,14 @@ func (f flushReason) String() string { return "Stale" case reasonSpreadFlush: return "Spread" + case noUser: + return "NoUser" + case noSeries: + return "NoSeries" + case noChunks: + return "NoChunksToFlush" + case flushError: + return "FlushError" default: panic("unrecognised flushReason") } @@ -146,6 +160,7 @@ func (i *Ingester) sweepSeries(userID string, fp model.Fingerprint, series *memo flushQueueIndex := int(uint64(fp) % uint64(i.cfg.ConcurrentFlushes)) if i.flushQueues[flushQueueIndex].Enqueue(&flushOp{firstTime, userID, fp, immediate}) { + i.metrics.seriesEnqueuedForFlush.WithLabelValues(flush.String()).Inc() util.Event().Log("msg", "add to flush queue", "userID", userID, "reason", flush, "firstTime", firstTime, "fp", fp, "series", series.metric, "nlabels", len(series.metric), "queue", flushQueueIndex) } } @@ -217,7 +232,8 @@ func (i *Ingester) flushLoop(j int) { } op := o.(*flushOp) - err := i.flushUserSeries(j, op.userID, op.fp, op.immediate) + outcome, err := i.flushUserSeries(j, op.userID, op.fp, op.immediate) + i.metrics.seriesDequeuedOutcome.WithLabelValues(outcome.String()).Inc() if err != nil { level.Error(util.WithUserID(op.userID, util.Logger)).Log("msg", "failed to flush user", "err", err) } @@ -231,7 +247,8 @@ func (i *Ingester) flushLoop(j int) { } } -func (i *Ingester) flushUserSeries(flushQueueIndex int, userID string, fp model.Fingerprint, immediate bool) error { +// Returns flush outcome (either original reason, if series was flushed, noFlush if it doesn't need flushing anymore, or one of the errors) +func (i *Ingester) flushUserSeries(flushQueueIndex int, userID string, fp model.Fingerprint, immediate bool) (flushReason, error) { i.metrics.flushSeriesInProgress.Inc() defer i.metrics.flushSeriesInProgress.Dec() @@ -241,19 +258,19 @@ func (i *Ingester) flushUserSeries(flushQueueIndex int, userID string, fp model. userState, ok := i.userStates.get(userID) if !ok { - return nil + return noUser, nil } series, ok := userState.fpToSeries.get(fp) if !ok { - return nil + return noSeries, nil } userState.fpLocker.Lock(fp) reason := i.shouldFlushSeries(series, fp, immediate) if reason == noFlush { userState.fpLocker.Unlock(fp) - return nil + return noFlush, nil } // shouldFlushSeries() has told us we have at least one chunk. @@ -302,11 +319,9 @@ func (i *Ingester) flushUserSeries(flushQueueIndex int, userID string, fp model. } if len(chunks) == 0 { - return nil + return noChunks, nil } - i.metrics.flushedSeries.WithLabelValues(reason.String()).Inc() - // flush the chunks without locking the series, as we don't want to hold the series lock for the duration of the dynamo/s3 rpcs. ctx, cancel := context.WithTimeout(context.Background(), i.cfg.FlushOpTimeout) defer cancel() // releases resources if slowOperation completes before timeout elapses @@ -318,7 +333,7 @@ func (i *Ingester) flushUserSeries(flushQueueIndex int, userID string, fp model. util.Event().Log("msg", "flush chunks", "userID", userID, "reason", reason, "numChunks", len(chunks), "firstTime", chunks[0].FirstTime, "fp", fp, "series", series.metric, "nlabels", len(series.metric), "queue", flushQueueIndex) err := i.flushChunks(ctx, userID, fp, series.metric, chunks) if err != nil { - return err + return flushError, err } userState.fpLocker.Lock(fp) @@ -329,7 +344,7 @@ func (i *Ingester) flushUserSeries(flushQueueIndex int, userID string, fp model. chunks[i].LastUpdate = model.Now() } userState.fpLocker.Unlock(fp) - return nil + return reason, err } // must be called under fpLocker lock diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go index 79634f77b16ba..2b6cafaa78ebb 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go @@ -71,9 +71,9 @@ type Config struct { RateUpdatePeriod time.Duration `yaml:"rate_update_period"` - // Use tsdb block storage - TSDBEnabled bool `yaml:"-"` - TSDBConfig tsdb.Config `yaml:"-"` + // Use blocks storage. + BlocksStorageEnabled bool `yaml:"-"` + BlocksStorageConfig tsdb.BlocksStorageConfig `yaml:"-"` // Injected at runtime and read from the distributor config, required // to accurately apply global limits. @@ -158,7 +158,7 @@ func New(cfg Config, clientConfig client.Config, limits *validation.Overrides, c cfg.ingesterClientFactory = client.MakeIngesterClient } - if cfg.TSDBEnabled { + if cfg.BlocksStorageEnabled { return NewV2(cfg, clientConfig, limits, registerer) } @@ -263,7 +263,7 @@ func (i *Ingester) startFlushLoops() { // * Always replays the WAL. // * Does not start the lifecycler. func NewForFlusher(cfg Config, chunkStore ChunkStore, registerer prometheus.Registerer) (*Ingester, error) { - if cfg.TSDBEnabled { + if cfg.BlocksStorageEnabled { return NewV2ForFlusher(cfg, registerer) } @@ -379,7 +379,7 @@ func (i *Ingester) Push(ctx context.Context, req *client.WriteRequest) (*client. return nil, err } - if i.cfg.TSDBEnabled { + if i.cfg.BlocksStorageEnabled { return i.v2Push(ctx, req) } @@ -619,7 +619,7 @@ func (i *Ingester) Query(ctx context.Context, req *client.QueryRequest) (*client return nil, err } - if i.cfg.TSDBEnabled { + if i.cfg.BlocksStorageEnabled { return i.v2Query(ctx, req) } @@ -686,7 +686,7 @@ func (i *Ingester) QueryStream(req *client.QueryRequest, stream client.Ingester_ return err } - if i.cfg.TSDBEnabled { + if i.cfg.BlocksStorageEnabled { return i.v2QueryStream(req, stream) } @@ -767,7 +767,7 @@ func (i *Ingester) LabelValues(ctx context.Context, req *client.LabelValuesReque return nil, err } - if i.cfg.TSDBEnabled { + if i.cfg.BlocksStorageEnabled { return i.v2LabelValues(ctx, req) } @@ -792,7 +792,7 @@ func (i *Ingester) LabelNames(ctx context.Context, req *client.LabelNamesRequest return nil, err } - if i.cfg.TSDBEnabled { + if i.cfg.BlocksStorageEnabled { return i.v2LabelNames(ctx, req) } @@ -817,7 +817,7 @@ func (i *Ingester) MetricsForLabelMatchers(ctx context.Context, req *client.Metr return nil, err } - if i.cfg.TSDBEnabled { + if i.cfg.BlocksStorageEnabled { return i.v2MetricsForLabelMatchers(ctx, req) } @@ -887,7 +887,7 @@ func (i *Ingester) UserStats(ctx context.Context, req *client.UserStatsRequest) return nil, err } - if i.cfg.TSDBEnabled { + if i.cfg.BlocksStorageEnabled { return i.v2UserStats(ctx, req) } @@ -916,7 +916,7 @@ func (i *Ingester) AllUserStats(ctx context.Context, req *client.UserStatsReques return nil, err } - if i.cfg.TSDBEnabled { + if i.cfg.BlocksStorageEnabled { return i.v2AllUserStats(ctx, req) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go index da21f5da928e3..78d9a264e2e54 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go @@ -193,7 +193,7 @@ func newTSDBState(bucketClient objstore.Bucket, registerer prometheus.Registerer // NewV2 returns a new Ingester that uses Cortex block storage instead of chunks storage. func NewV2(cfg Config, clientConfig client.Config, limits *validation.Overrides, registerer prometheus.Registerer) (*Ingester, error) { util.WarnExperimentalUse("Blocks storage engine") - bucketClient, err := cortex_tsdb.NewBucketClient(context.Background(), cfg.TSDBConfig, "ingester", util.Logger, registerer) + bucketClient, err := cortex_tsdb.NewBucketClient(context.Background(), cfg.BlocksStorageConfig, "ingester", util.Logger, registerer) if err != nil { return nil, errors.Wrap(err, "failed to create the bucket client") } @@ -219,7 +219,7 @@ func NewV2(cfg Config, clientConfig client.Config, limits *validation.Overrides, }, i.numSeriesInTSDB) } - i.lifecycler, err = ring.NewLifecycler(cfg.LifecyclerConfig, i, "ingester", ring.IngesterRingKey, cfg.TSDBConfig.FlushBlocksOnShutdown, registerer) + i.lifecycler, err = ring.NewLifecycler(cfg.LifecyclerConfig, i, "ingester", ring.IngesterRingKey, cfg.BlocksStorageConfig.TSDB.FlushBlocksOnShutdown, registerer) if err != nil { return nil, err } @@ -240,7 +240,7 @@ func NewV2(cfg Config, clientConfig client.Config, limits *validation.Overrides, // on Flush method and flush all openened TSDBs when called. func NewV2ForFlusher(cfg Config, registerer prometheus.Registerer) (*Ingester, error) { util.WarnExperimentalUse("Blocks storage engine") - bucketClient, err := cortex_tsdb.NewBucketClient(context.Background(), cfg.TSDBConfig, "ingester", util.Logger, registerer) + bucketClient, err := cortex_tsdb.NewBucketClient(context.Background(), cfg.BlocksStorageConfig, "ingester", util.Logger, registerer) if err != nil { return nil, errors.Wrap(err, "failed to create the bucket client") } @@ -288,7 +288,7 @@ func (i *Ingester) startingV2(ctx context.Context) error { compactionService := services.NewBasicService(nil, i.compactionLoop, nil) servs = append(servs, compactionService) - if i.cfg.TSDBConfig.ShipInterval > 0 { + if i.cfg.BlocksStorageConfig.TSDB.ShipInterval > 0 { shippingService := services.NewBasicService(nil, i.shipBlocksLoop, nil) servs = append(servs, shippingService) } @@ -302,7 +302,7 @@ func (i *Ingester) startingV2(ctx context.Context) error { } func (i *Ingester) stoppingV2ForFlusher(_ error) error { - if !i.cfg.TSDBConfig.KeepUserTSDBOpenOnShutdown { + if !i.cfg.BlocksStorageConfig.TSDB.KeepUserTSDBOpenOnShutdown { i.closeAllTSDB() } return nil @@ -323,7 +323,7 @@ func (i *Ingester) stoppingV2(_ error) error { level.Warn(util.Logger).Log("msg", "failed to stop ingester lifecycler", "err", err) } - if !i.cfg.TSDBConfig.KeepUserTSDBOpenOnShutdown { + if !i.cfg.BlocksStorageConfig.TSDB.KeepUserTSDBOpenOnShutdown { i.closeAllTSDB() } return nil @@ -921,10 +921,10 @@ func (i *Ingester) getOrCreateTSDB(userID string, force bool) (*userTSDB, error) // createTSDB creates a TSDB for a given userID, and returns the created db. func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { tsdbPromReg := prometheus.NewRegistry() - udir := i.cfg.TSDBConfig.BlocksDir(userID) + udir := i.cfg.BlocksStorageConfig.TSDB.BlocksDir(userID) userLogger := util.WithUserID(userID, util.Logger) - blockRanges := i.cfg.TSDBConfig.BlockRanges.ToMilliseconds() + blockRanges := i.cfg.BlocksStorageConfig.TSDB.BlockRanges.ToMilliseconds() userDB := &userTSDB{ userID: userID, @@ -937,12 +937,12 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { // Create a new user database db, err := tsdb.Open(udir, userLogger, tsdbPromReg, &tsdb.Options{ - RetentionDuration: i.cfg.TSDBConfig.Retention.Milliseconds(), + RetentionDuration: i.cfg.BlocksStorageConfig.TSDB.Retention.Milliseconds(), MinBlockDuration: blockRanges[0], MaxBlockDuration: blockRanges[len(blockRanges)-1], NoLockfile: true, - StripeSize: i.cfg.TSDBConfig.StripeSize, - WALCompression: i.cfg.TSDBConfig.WALCompressionEnabled, + StripeSize: i.cfg.BlocksStorageConfig.TSDB.StripeSize, + WALCompression: i.cfg.BlocksStorageConfig.TSDB.WALCompressionEnabled, SeriesLifecycleCallback: userDB, }) if err != nil { @@ -979,7 +979,7 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { } // Create a new shipper for this database - if i.cfg.TSDBConfig.ShipInterval > 0 { + if i.cfg.BlocksStorageConfig.TSDB.ShipInterval > 0 { userDB.shipper = shipper.New( userLogger, tsdbPromReg, @@ -987,7 +987,8 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { cortex_tsdb.NewUserBucketClient(userID, i.TSDBState.bucket), func() labels.Labels { return l }, metadata.ReceiveSource, - true, // Allow out of order uploads. It's fine in Cortex's context. + false, // No need to upload compacted blocks. Cortex compactor takes care of that. + true, // Allow out of order uploads. It's fine in Cortex's context. ) } @@ -1033,15 +1034,15 @@ func (i *Ingester) closeAllTSDB() { func (i *Ingester) openExistingTSDB(ctx context.Context) error { level.Info(util.Logger).Log("msg", "opening existing TSDBs") wg := &sync.WaitGroup{} - openGate := gate.New(i.cfg.TSDBConfig.MaxTSDBOpeningConcurrencyOnStartup) + openGate := gate.New(i.cfg.BlocksStorageConfig.TSDB.MaxTSDBOpeningConcurrencyOnStartup) - err := filepath.Walk(i.cfg.TSDBConfig.Dir, func(path string, info os.FileInfo, err error) error { + err := filepath.Walk(i.cfg.BlocksStorageConfig.TSDB.Dir, func(path string, info os.FileInfo, err error) error { if err != nil { return filepath.SkipDir } // Skip root dir and all other files - if path == i.cfg.TSDBConfig.Dir || !info.IsDir() { + if path == i.cfg.BlocksStorageConfig.TSDB.Dir || !info.IsDir() { return nil } @@ -1116,7 +1117,7 @@ func (i *Ingester) numSeriesInTSDB() float64 { } func (i *Ingester) shipBlocksLoop(ctx context.Context) error { - shipTicker := time.NewTicker(i.cfg.TSDBConfig.ShipInterval) + shipTicker := time.NewTicker(i.cfg.BlocksStorageConfig.TSDB.ShipInterval) defer shipTicker.Stop() for { @@ -1153,7 +1154,7 @@ func (i *Ingester) shipBlocks(ctx context.Context) { // Number of concurrent workers is limited in order to avoid to concurrently sync a lot // of tenants in a large cluster. - i.runConcurrentUserWorkers(ctx, i.cfg.TSDBConfig.ShipConcurrency, func(userID string) { + i.runConcurrentUserWorkers(ctx, i.cfg.BlocksStorageConfig.TSDB.ShipConcurrency, func(userID string) { // Get the user's DB. If the user doesn't exist, we skip it. userDB := i.getTSDB(userID) if userDB == nil || userDB.shipper == nil { @@ -1170,7 +1171,7 @@ func (i *Ingester) shipBlocks(ctx context.Context) { } func (i *Ingester) compactionLoop(ctx context.Context) error { - ticker := time.NewTicker(i.cfg.TSDBConfig.HeadCompactionInterval) + ticker := time.NewTicker(i.cfg.BlocksStorageConfig.TSDB.HeadCompactionInterval) defer ticker.Stop() for { @@ -1204,7 +1205,7 @@ func (i *Ingester) compactBlocks(ctx context.Context, force bool) { } } - i.runConcurrentUserWorkers(ctx, i.cfg.TSDBConfig.HeadCompactionConcurrency, func(userID string) { + i.runConcurrentUserWorkers(ctx, i.cfg.BlocksStorageConfig.TSDB.HeadCompactionConcurrency, func(userID string) { userDB := i.getTSDB(userID) if userDB == nil { return @@ -1226,7 +1227,7 @@ func (i *Ingester) compactBlocks(ctx context.Context, force bool) { reason = "forced" err = userDB.CompactHead(tsdb.NewRangeHead(h, h.MinTime(), h.MaxTime())) - case i.cfg.TSDBConfig.HeadCompactionIdleTimeout > 0 && userDB.isIdle(time.Now(), i.cfg.TSDBConfig.HeadCompactionIdleTimeout): + case i.cfg.BlocksStorageConfig.TSDB.HeadCompactionIdleTimeout > 0 && userDB.isIdle(time.Now(), i.cfg.BlocksStorageConfig.TSDB.HeadCompactionIdleTimeout): reason = "idle" level.Info(util.Logger).Log("msg", "TSDB is idle, forcing compaction", "user", userID) err = userDB.CompactHead(tsdb.NewRangeHead(h, h.MinTime(), h.MaxTime())) @@ -1289,7 +1290,7 @@ func (i *Ingester) v2LifecyclerFlush() { ctx := context.Background() i.compactBlocks(ctx, true) - if i.cfg.TSDBConfig.ShipInterval > 0 { + if i.cfg.BlocksStorageConfig.TSDB.ShipInterval > 0 { i.shipBlocks(ctx) } @@ -1325,7 +1326,7 @@ func (i *Ingester) v2FlushHandler(w http.ResponseWriter, _ *http.Request) { return } - if i.cfg.TSDBConfig.ShipInterval > 0 { + if i.cfg.BlocksStorageConfig.TSDB.ShipInterval > 0 { level.Info(util.Logger).Log("msg", "flushing TSDB blocks: triggering shipping") select { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/mapper.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/mapper.go index 87c1f622b7d49..18977e7176e7a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/mapper.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/mapper.go @@ -5,10 +5,10 @@ import ( "sort" "strings" "sync" - "sync/atomic" "github.com/go-kit/kit/log/level" "github.com/prometheus/common/model" + "go.uber.org/atomic" "github.com/cortexproject/cortex/pkg/util" ) @@ -24,8 +24,7 @@ type fpMappings map[model.Fingerprint]map[string]model.Fingerprint // fpMapper is used to map fingerprints in order to work around fingerprint // collisions. type fpMapper struct { - // highestMappedFP has to be aligned for atomic operations. - highestMappedFP model.Fingerprint + highestMappedFP atomic.Uint64 mtx sync.RWMutex // Protects mappings. mappings fpMappings @@ -130,7 +129,7 @@ func (m *fpMapper) maybeAddMapping( } func (m *fpMapper) nextMappedFP() model.Fingerprint { - mappedFP := model.Fingerprint(atomic.AddUint64((*uint64)(&m.highestMappedFP), 1)) + mappedFP := model.Fingerprint(m.highestMappedFP.Inc()) if mappedFP > maxMappedFP { panic(fmt.Errorf("more than %v fingerprints mapped in collision detection", maxMappedFP)) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/metrics.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/metrics.go index 22e5ebc565426..b052c3b996c4d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/metrics.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/metrics.go @@ -53,7 +53,8 @@ type ingesterMetrics struct { chunkSize prometheus.Histogram chunkAge prometheus.Histogram memoryChunks prometheus.Gauge - flushedSeries *prometheus.CounterVec + seriesEnqueuedForFlush *prometheus.CounterVec + seriesDequeuedOutcome *prometheus.CounterVec droppedChunks prometheus.Counter oldestUnflushedChunkTimestamp prometheus.Gauge } @@ -192,10 +193,14 @@ func newIngesterMetrics(r prometheus.Registerer, createMetricsConflictingWithTSD Name: "cortex_ingester_memory_chunks", Help: "The total number of chunks in memory.", }), - flushedSeries: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Name: "cortex_ingester_series_flushed_total", - Help: "Total number of flushed series, with reasons.", + seriesEnqueuedForFlush: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ + Name: "cortex_ingester_flushing_enqueued_series_total", + Help: "Total number of series enqueued for flushing, with reasons.", }, []string{"reason"}), + seriesDequeuedOutcome: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ + Name: "cortex_ingester_flushing_dequeued_series_total", + Help: "Total number of series dequeued for flushing, with outcome (superset of enqueue reasons)", + }, []string{"outcome"}), droppedChunks: promauto.With(r).NewCounter(prometheus.CounterOpts{ Name: "cortex_ingester_dropped_chunks_total", Help: "Total number of chunks dropped from flushing because they have too few samples.", diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/rate.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/rate.go index d0c348677b4ef..ecabd8b783a0b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/rate.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/rate.go @@ -2,13 +2,14 @@ package ingester import ( "sync" - "sync/atomic" "time" + + "go.uber.org/atomic" ) // ewmaRate tracks an exponentially weighted moving average of a per-second rate. type ewmaRate struct { - newEvents int64 + newEvents atomic.Int64 alpha float64 interval time.Duration lastRate float64 @@ -32,8 +33,8 @@ func (r *ewmaRate) rate() float64 { // tick assumes to be called every r.interval. func (r *ewmaRate) tick() { - newEvents := atomic.LoadInt64(&r.newEvents) - atomic.AddInt64(&r.newEvents, -newEvents) + newEvents := r.newEvents.Load() + r.newEvents.Sub(newEvents) instantRate := float64(newEvents) / r.interval.Seconds() r.mutex.Lock() @@ -49,9 +50,9 @@ func (r *ewmaRate) tick() { // inc counts one event. func (r *ewmaRate) inc() { - atomic.AddInt64(&r.newEvents, 1) + r.newEvents.Inc() } func (r *ewmaRate) add(delta int64) { - atomic.AddInt64(&r.newEvents, delta) + r.newEvents.Add(delta) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/series_map.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/series_map.go index a8e4ba70613b1..4d4a9a5b6694a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/series_map.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/series_map.go @@ -2,10 +2,10 @@ package ingester import ( "sync" - "sync/atomic" "unsafe" "github.com/prometheus/common/model" + "go.uber.org/atomic" "github.com/cortexproject/cortex/pkg/util" ) @@ -16,7 +16,7 @@ const seriesMapShards = 128 // goroutine-safe. A seriesMap is effectively a goroutine-safe version of // map[model.Fingerprint]*memorySeries. type seriesMap struct { - size int32 + size atomic.Int32 shards []shard } @@ -65,7 +65,7 @@ func (sm *seriesMap) put(fp model.Fingerprint, s *memorySeries) { shard.mtx.Unlock() if !ok { - atomic.AddInt32(&sm.size, 1) + sm.size.Inc() } } @@ -77,7 +77,7 @@ func (sm *seriesMap) del(fp model.Fingerprint) { delete(shard.m, fp) shard.mtx.Unlock() if ok { - atomic.AddInt32(&sm.size, -1) + sm.size.Dec() } } @@ -106,5 +106,5 @@ func (sm *seriesMap) iter() <-chan fingerprintSeriesPair { } func (sm *seriesMap) length() int { - return int(atomic.LoadInt32(&sm.size)) + return int(sm.size.Load()) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/transfer.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/transfer.go index 5310353914c6b..383f00d78f6a0 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/transfer.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/transfer.go @@ -221,7 +221,7 @@ func (i *Ingester) TransferTSDB(stream client.Ingester_TransferTSDBServer) error xfer := func() error { // Validate the final directory is empty, if it exists and is empty delete it so a move can succeed - err := removeEmptyDir(i.cfg.TSDBConfig.Dir) + err := removeEmptyDir(i.cfg.BlocksStorageConfig.TSDB.Dir) if err != nil { return errors.Wrap(err, "remove existing TSDB directory") } @@ -304,9 +304,9 @@ func (i *Ingester) TransferTSDB(stream client.Ingester_TransferTSDBServer) error level.Info(util.Logger).Log("msg", "Total xfer", "from_ingester", fromIngesterID, "files", filesXfer, "bytes", bytesXfer) // Move the tmpdir to the final location - err = os.Rename(tmpDir, i.cfg.TSDBConfig.Dir) + err = os.Rename(tmpDir, i.cfg.BlocksStorageConfig.TSDB.Dir) if err != nil { - return errors.Wrap(err, fmt.Sprintf("unable to move received TSDB blocks from %s to %s", tmpDir, i.cfg.TSDBConfig.Dir)) + return errors.Wrap(err, fmt.Sprintf("unable to move received TSDB blocks from %s to %s", tmpDir, i.cfg.BlocksStorageConfig.TSDB.Dir)) } // At this point all TSDBs have been received, so we can proceed loading TSDBs in memory. @@ -315,9 +315,9 @@ func (i *Ingester) TransferTSDB(stream client.Ingester_TransferTSDBServer) error // 2. If a query is received on user X, for which the TSDB has been transferred, before // the first series is ingested, if we don't open the TSDB the query will return an // empty result (because the TSDB is opened only on first push or transfer) - userIDs, err := ioutil.ReadDir(i.cfg.TSDBConfig.Dir) + userIDs, err := ioutil.ReadDir(i.cfg.BlocksStorageConfig.TSDB.Dir) if err != nil { - return errors.Wrap(err, fmt.Sprintf("unable to list TSDB users in %s", i.cfg.TSDBConfig.Dir)) + return errors.Wrap(err, fmt.Sprintf("unable to list TSDB users in %s", i.cfg.BlocksStorageConfig.TSDB.Dir)) } for _, user := range userIDs { @@ -438,7 +438,7 @@ func (i *Ingester) TransferOut(ctx context.Context) error { } func (i *Ingester) transferOut(ctx context.Context) error { - if i.cfg.TSDBEnabled { + if i.cfg.BlocksStorageEnabled { return i.v2TransferOut(ctx) } @@ -584,7 +584,7 @@ func (i *Ingester) v2TransferOut(ctx context.Context) error { } // Grab a list of all blocks that need to be shipped - blocks, err := unshippedBlocks(i.cfg.TSDBConfig.Dir) + blocks, err := unshippedBlocks(i.cfg.BlocksStorageConfig.TSDB.Dir) if err != nil { return err } @@ -592,7 +592,7 @@ func (i *Ingester) v2TransferOut(ctx context.Context) error { for user, blockIDs := range blocks { // Transfer the users TSDB // TODO(thor) transferring users can be done concurrently - i.transferUser(ctx, stream, i.cfg.TSDBConfig.Dir, i.lifecycler.ID, user, blockIDs) + i.transferUser(ctx, stream, i.cfg.BlocksStorageConfig.TSDB.Dir, i.lifecycler.ID, user, blockIDs) } _, err = stream.CloseAndRecv() diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/block.go b/vendor/github.com/cortexproject/cortex/pkg/querier/block.go index 5cb7648b030dd..cfc90fcdb838a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/block.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/block.go @@ -6,7 +6,6 @@ import ( "github.com/pkg/errors" "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/thanos-io/thanos/pkg/store/storepb" @@ -207,7 +206,7 @@ func (it *blockQuerierSeriesIterator) Err() error { err := it.iterators[it.i].Err() if err != nil { - return promql.ErrStorage{Err: errors.Wrapf(err, "cannot iterate chunk for series: %v", it.labels)} + return errors.Wrapf(err, "cannot iterate chunk for series: %v", it.labels) } return nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_queryable.go b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_queryable.go index 6c8e392cacbeb..e0c366d7e65c3 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_queryable.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_queryable.go @@ -16,7 +16,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" @@ -150,7 +149,7 @@ func NewBlocksStoreQueryable(stores BlocksStoreSet, finder BlocksFinder, consist return q, nil } -func NewBlocksStoreQueryableFromConfig(querierCfg Config, gatewayCfg storegateway.Config, storageCfg cortex_tsdb.Config, limits BlocksStoreLimits, logger log.Logger, reg prometheus.Registerer) (*BlocksStoreQueryable, error) { +func NewBlocksStoreQueryableFromConfig(querierCfg Config, gatewayCfg storegateway.Config, storageCfg cortex_tsdb.BlocksStorageConfig, limits BlocksStoreLimits, logger log.Logger, reg prometheus.Registerer) (*BlocksStoreQueryable, error) { var stores BlocksStoreSet bucketClient, err := cortex_tsdb.NewBucketClient(context.Background(), storageCfg, "querier", logger, reg) @@ -249,12 +248,12 @@ func (q *BlocksStoreQueryable) stopping(_ error) error { // Querier returns a new Querier on the storage. func (q *BlocksStoreQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { if s := q.State(); s != services.Running { - return nil, promql.ErrStorage{Err: errors.Errorf("BlocksStoreQueryable is not running: %v", s)} + return nil, errors.Errorf("BlocksStoreQueryable is not running: %v", s) } userID, err := user.ExtractOrgID(ctx) if err != nil { - return nil, promql.ErrStorage{Err: err} + return nil, err } return &blocksStoreQuerier{ @@ -291,14 +290,7 @@ type blocksStoreQuerier struct { // Select implements storage.Querier interface. // The bool passed is ignored because the series is always sorted. func (q *blocksStoreQuerier) Select(_ bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - set := q.selectSorted(sp, matchers...) - - // We need to wrap the error in order to have Prometheus returning a 5xx error. - if err := set.Err(); err != nil && !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { - set = storage.ErrSeriesSet(promql.ErrStorage{Err: err}) - } - - return set + return q.selectSorted(sp, matchers...) } func (q *blocksStoreQuerier) LabelValues(name string) ([]string, storage.Warnings, error) { @@ -334,7 +326,7 @@ func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...* maxT = util.Min64(maxT, util.TimeToMillis(now.Add(-q.queryStoreAfter))) if origMaxT != maxT { - level.Debug(spanLog).Log("msg", "query max time has been manipulated", "original", origMaxT, "updated", maxT) + level.Debug(spanLog).Log("msg", "the max time of the query to blocks storage has been manipulated", "original", origMaxT, "updated", maxT) } if maxT < minT { diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/chunk_store_queryable.go b/vendor/github.com/cortexproject/cortex/pkg/querier/chunk_store_queryable.go index 50fa8573fb642..b36db0a2429df 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/chunk_store_queryable.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/chunk_store_queryable.go @@ -5,7 +5,6 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/weaveworks/common/user" @@ -46,19 +45,7 @@ func (q *chunkStoreQuerier) Select(_ bool, sp *storage.SelectHints, matchers ... } chunks, err := q.store.Get(q.ctx, userID, model.Time(sp.Start), model.Time(sp.End), matchers...) if err != nil { - switch err.(type) { - case promql.ErrStorage, promql.ErrTooManySamples, promql.ErrQueryCanceled, promql.ErrQueryTimeout: - // Recognized by Prometheus API, vendor/github.com/prometheus/prometheus/promql/engine.go:91. - // Don't translate those, just in case we use them internally. - return storage.ErrSeriesSet(err) - case chunk.QueryError: - // This will be returned with status code 422 by Prometheus API. - // vendor/github.com/prometheus/prometheus/web/api/v1/api.go:1393 - return storage.ErrSeriesSet(err) - default: - // All other errors will be returned as 500. - return storage.ErrSeriesSet(promql.ErrStorage{Err: err}) - } + return storage.ErrSeriesSet(err) } return partitionChunks(chunks, q.mint, q.maxt, q.chunkIteratorFunc) diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go b/vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go index 675946f0e4ccf..4912832a2c680 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go @@ -5,9 +5,9 @@ import ( "sort" "time" + "github.com/go-kit/kit/log/level" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/storage" "github.com/weaveworks/common/user" @@ -31,44 +31,46 @@ type Distributor interface { MetricsMetadata(ctx context.Context) ([]scrape.MetricMetadata, error) } -func newDistributorQueryable(distributor Distributor, streaming bool, iteratorFn chunkIteratorFunc, queryIngesterWithin time.Duration) QueryableWithFilter { +func newDistributorQueryable(distributor Distributor, streaming bool, iteratorFn chunkIteratorFunc, queryIngestersWithin time.Duration) QueryableWithFilter { return distributorQueryable{ - distributor: distributor, - streaming: streaming, - iteratorFn: iteratorFn, - queryIngesterWithin: queryIngesterWithin, + distributor: distributor, + streaming: streaming, + iteratorFn: iteratorFn, + queryIngestersWithin: queryIngestersWithin, } } type distributorQueryable struct { - distributor Distributor - streaming bool - iteratorFn chunkIteratorFunc - queryIngesterWithin time.Duration + distributor Distributor + streaming bool + iteratorFn chunkIteratorFunc + queryIngestersWithin time.Duration } func (d distributorQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { return &distributorQuerier{ - distributor: d.distributor, - ctx: ctx, - mint: mint, - maxt: maxt, - streaming: d.streaming, - chunkIterFn: d.iteratorFn, + distributor: d.distributor, + ctx: ctx, + mint: mint, + maxt: maxt, + streaming: d.streaming, + chunkIterFn: d.iteratorFn, + queryIngestersWithin: d.queryIngestersWithin, }, nil } func (d distributorQueryable) UseQueryable(now time.Time, _, queryMaxT int64) bool { // Include ingester only if maxt is within QueryIngestersWithin w.r.t. current time. - return d.queryIngesterWithin == 0 || queryMaxT >= util.TimeToMillis(now.Add(-d.queryIngesterWithin)) + return d.queryIngestersWithin == 0 || queryMaxT >= util.TimeToMillis(now.Add(-d.queryIngestersWithin)) } type distributorQuerier struct { - distributor Distributor - ctx context.Context - mint, maxt int64 - streaming bool - chunkIterFn chunkIteratorFunc + distributor Distributor + ctx context.Context + mint, maxt int64 + streaming bool + chunkIterFn chunkIteratorFunc + queryIngestersWithin time.Duration } // Select implements storage.Querier interface. @@ -77,42 +79,62 @@ func (q *distributorQuerier) Select(_ bool, sp *storage.SelectHints, matchers .. log, ctx := spanlogger.New(q.ctx, "distributorQuerier.Select") defer log.Span.Finish() + minT, maxT := q.mint, q.maxt + if sp != nil { + minT, maxT = sp.Start, sp.End + } + + // If queryIngestersWithin is enabled, we do manipulate the query mint to query samples up until + // now - queryIngestersWithin, because older time ranges are covered by the storage. This + // optimization is particularly important for the blocks storage where the blocks retention in the + // ingesters could be way higher than queryIngestersWithin. + if q.queryIngestersWithin > 0 { + now := time.Now() + origMinT := minT + minT = util.Max64(minT, util.TimeToMillis(now.Add(-q.queryIngestersWithin))) + + if origMinT != minT { + level.Debug(log).Log("msg", "the min time of the query to ingesters has been manipulated", "original", origMinT, "updated", minT) + } + + if minT > maxT { + level.Debug(log).Log("msg", "empty query time range after min time manipulation") + return storage.EmptySeriesSet() + } + } + // Kludge: Prometheus passes nil SelectParams if it is doing a 'series' operation, // which needs only metadata. if sp == nil { - ms, err := q.distributor.MetricsForLabelMatchers(ctx, model.Time(q.mint), model.Time(q.maxt), matchers...) + ms, err := q.distributor.MetricsForLabelMatchers(ctx, model.Time(minT), model.Time(maxT), matchers...) if err != nil { return storage.ErrSeriesSet(err) } return series.MetricsToSeriesSet(ms) } - mint, maxt := sp.Start, sp.End - if q.streaming { - return q.streamingSelect(*sp, matchers) + return q.streamingSelect(minT, maxT, matchers) } - matrix, err := q.distributor.Query(ctx, model.Time(mint), model.Time(maxt), matchers...) + matrix, err := q.distributor.Query(ctx, model.Time(minT), model.Time(maxT), matchers...) if err != nil { - return storage.ErrSeriesSet(promql.ErrStorage{Err: err}) + return storage.ErrSeriesSet(err) } // Using MatrixToSeriesSet (and in turn NewConcreteSeriesSet), sorts the series. return series.MatrixToSeriesSet(matrix) } -func (q *distributorQuerier) streamingSelect(sp storage.SelectHints, matchers []*labels.Matcher) storage.SeriesSet { +func (q *distributorQuerier) streamingSelect(minT, maxT int64, matchers []*labels.Matcher) storage.SeriesSet { userID, err := user.ExtractOrgID(q.ctx) if err != nil { - return storage.ErrSeriesSet(promql.ErrStorage{Err: err}) + return storage.ErrSeriesSet(err) } - mint, maxt := sp.Start, sp.End - - results, err := q.distributor.QueryStream(q.ctx, model.Time(mint), model.Time(maxt), matchers...) + results, err := q.distributor.QueryStream(q.ctx, model.Time(minT), model.Time(maxT), matchers...) if err != nil { - return storage.ErrSeriesSet(promql.ErrStorage{Err: err}) + return storage.ErrSeriesSet(err) } if len(results.Timeseries) != 0 { @@ -131,7 +153,7 @@ func (q *distributorQuerier) streamingSelect(sp storage.SelectHints, matchers [] chunks, err := chunkcompat.FromChunks(userID, ls, result.Chunks) if err != nil { - return storage.ErrSeriesSet(promql.ErrStorage{Err: err}) + return storage.ErrSeriesSet(err) } series := &chunkSeries{ diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.go b/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.go index f629a0aafe5df..d6beed38322ec 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.go @@ -71,7 +71,7 @@ type Frontend struct { // Metrics. queueDuration prometheus.Histogram - queueLength prometheus.Gauge + queueLength *prometheus.GaugeVec } type request struct { @@ -96,11 +96,11 @@ func New(cfg Config, log log.Logger, registerer prometheus.Registerer) (*Fronten Help: "Time spend by requests queued.", Buckets: prometheus.DefBuckets, }), - queueLength: promauto.With(registerer).NewGauge(prometheus.GaugeOpts{ + queueLength: promauto.With(registerer).NewGaugeVec(prometheus.GaugeOpts{ Namespace: "cortex", Name: "query_frontend_queue_length", Help: "Number of queries in the queue.", - }), + }, []string{"user"}), connectedClients: atomic.NewInt32(0), } f.cond = sync.NewCond(&f.mtx) @@ -363,7 +363,7 @@ func (f *Frontend) queueRequest(ctx context.Context, req *request) error { select { case queue <- req: - f.queueLength.Add(1) + f.queueLength.WithLabelValues(userID).Inc() f.cond.Broadcast() return nil default: @@ -416,7 +416,7 @@ FindQueue: f.cond.Broadcast() f.queueDuration.Observe(time.Since(request.enqueueTime).Seconds()) - f.queueLength.Add(-1) + f.queueLength.WithLabelValues(userID).Dec() request.queueSpan.Finish() // Ensure the request has not already expired. diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/worker.go b/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/worker.go index 7721b0eb054a2..c7d897916c244 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/worker.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/worker.go @@ -40,6 +40,10 @@ func (cfg *WorkerConfig) RegisterFlags(f *flag.FlagSet) { cfg.GRPCClientConfig.RegisterFlagsWithPrefix("querier.frontend-client", f) } +func (cfg *WorkerConfig) Validate(log log.Logger) error { + return cfg.GRPCClientConfig.Validate(log) +} + // Worker is the counter-part to the frontend, actually processing requests. type worker struct { cfg WorkerConfig diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go b/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go index 46e5b1e8dc18f..1fb6915bbb21d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go @@ -296,7 +296,7 @@ func (q querier) Select(_ bool, sp *storage.SelectHints, matchers ...*labels.Mat userID, err := user.ExtractOrgID(ctx) if err != nil { - return storage.ErrSeriesSet(promql.ErrStorage{Err: err}) + return storage.ErrSeriesSet(err) } // Validate query time range. @@ -308,7 +308,7 @@ func (q querier) Select(_ bool, sp *storage.SelectHints, matchers ...*labels.Mat tombstones, err := q.tombstonesLoader.GetPendingTombstonesForInterval(userID, startTime, endTime) if err != nil { - return storage.ErrSeriesSet(promql.ErrStorage{Err: err}) + return storage.ErrSeriesSet(err) } if len(q.queriers) == 1 { diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/limits.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/limits.go index 2256be9b1a4e9..54bb46f6d9c74 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/limits.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/limits.go @@ -43,7 +43,7 @@ func (l limits) Do(ctx context.Context, r Request) (Response, error) { maxQueryLen := l.MaxQueryLength(userid) queryLen := timestamp.Time(r.GetEnd()).Sub(timestamp.Time(r.GetStart())) - if maxQueryLen != 0 && queryLen > maxQueryLen { + if maxQueryLen > 0 && queryLen > maxQueryLen { return nil, httpgrpc.Errorf(http.StatusBadRequest, validation.ErrQueryTooLong, queryLen, maxQueryLen) } return l.next.Do(ctx, r) diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/query_range.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/query_range.go index 04d14498928f0..5feb80137e4ea 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/query_range.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/query_range.go @@ -133,6 +133,10 @@ func (prometheusCodec) MergeResponse(responses ...Response) (Response, error) { if len(responses) == 0 { return &PrometheusResponse{ Status: StatusSuccess, + Data: PrometheusData{ + ResultType: model.ValMatrix.String(), + Result: []SampleStream{}, + }, }, nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go index d766c7b5509a6..30440d0b910dd 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go @@ -14,6 +14,7 @@ import ( "github.com/gogo/protobuf/types" "github.com/opentracing/opentracing-go" otlog "github.com/opentracing/opentracing-go/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/uber/jaeger-client-go" "github.com/weaveworks/common/httpgrpc" @@ -128,8 +129,9 @@ func NewResultsCacheMiddleware( merger Merger, extractor Extractor, cacheGenNumberLoader CacheGenNumberLoader, + reg prometheus.Registerer, ) (Middleware, cache.Cache, error) { - c, err := cache.New(cfg.CacheConfig) + c, err := cache.New(cfg.CacheConfig, reg, logger) if err != nil { return nil, nil, err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/roundtrip.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/roundtrip.go index dac7de72ccc8f..66c06117bcede 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/roundtrip.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/roundtrip.go @@ -159,7 +159,7 @@ func NewTripperware( var c cache.Cache if cfg.CacheResults { - queryCacheMiddleware, cache, err := NewResultsCacheMiddleware(log, cfg.ResultsCacheConfig, constSplitter(cfg.SplitQueriesByInterval), limits, codec, cacheExtractor, cacheGenNumberLoader) + queryCacheMiddleware, cache, err := NewResultsCacheMiddleware(log, cfg.ResultsCacheConfig, constSplitter(cfg.SplitQueriesByInterval), limits, codec, cacheExtractor, cacheGenNumberLoader, registerer) if err != nil { return nil, nil, err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler.go b/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler.go index e23d5522bcb38..7334fa9c638eb 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler.go @@ -253,13 +253,11 @@ func (l *BasicLifecycler) registerInstance(ctx context.Context) error { return ringDesc, true, nil } - if instanceDesc.State != state || !tokens.Equals(instanceDesc.Tokens) { - instanceDesc = ringDesc.AddIngester(l.cfg.ID, l.cfg.Addr, l.cfg.Zone, tokens, state) - return ringDesc, true, nil - } - - // We haven't modified the ring, so don't try to store it. - return nil, true, nil + // Always overwrite the instance in the ring (even if already exists) because some properties + // may have changed (stated, tokens, zone, address) and even if they didn't the heartbeat at + // least did. + instanceDesc = ringDesc.AddIngester(l.cfg.ID, l.cfg.Addr, l.cfg.Zone, tokens, state) + return ringDesc, true, nil }) if err != nil { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go b/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go index c5e6a00b55939..89a24656aacbd 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go @@ -4,12 +4,13 @@ import ( "context" "fmt" "sync" - "sync/atomic" + + "go.uber.org/atomic" ) type batchTracker struct { - rpcsPending int32 - rpcsFailed int32 + rpcsPending atomic.Int32 + rpcsFailed atomic.Int32 done chan struct{} err chan error } @@ -23,8 +24,8 @@ type ingester struct { type itemTracker struct { minSuccess int maxFailures int - succeeded int32 - failed int32 + succeeded atomic.Int32 + failed atomic.Int32 } // DoBatch request against a set of keys in the ring, handling replication and @@ -70,10 +71,10 @@ func DoBatch(ctx context.Context, r ReadRing, keys []uint32, callback func(Inges } tracker := batchTracker{ - rpcsPending: int32(len(itemTrackers)), - done: make(chan struct{}, 1), - err: make(chan error, 1), + done: make(chan struct{}, 1), + err: make(chan error, 1), } + tracker.rpcsPending.Store(int32(len(itemTrackers))) var wg sync.WaitGroup @@ -115,17 +116,17 @@ func (b *batchTracker) record(sampleTrackers []*itemTracker, err error) { // goroutine will write to either channel. for i := range sampleTrackers { if err != nil { - if atomic.AddInt32(&sampleTrackers[i].failed, 1) <= int32(sampleTrackers[i].maxFailures) { + if sampleTrackers[i].failed.Inc() <= int32(sampleTrackers[i].maxFailures) { continue } - if atomic.AddInt32(&b.rpcsFailed, 1) == 1 { + if b.rpcsFailed.Inc() == 1 { b.err <- err } } else { - if atomic.AddInt32(&sampleTrackers[i].succeeded, 1) != int32(sampleTrackers[i].minSuccess) { + if sampleTrackers[i].succeeded.Inc() != int32(sampleTrackers[i].minSuccess) { continue } - if atomic.AddInt32(&b.rpcsPending, -1) == 0 { + if b.rpcsPending.Dec() == 0 { b.done <- struct{}{} } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/tcp_transport.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/tcp_transport.go index dc1490787983d..d6af8b2ec9905 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/tcp_transport.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/tcp_transport.go @@ -9,7 +9,6 @@ import ( "io/ioutil" "net" "sync" - "sync/atomic" "time" "github.com/go-kit/kit/log" @@ -17,6 +16,7 @@ import ( "github.com/hashicorp/go-sockaddr" "github.com/hashicorp/memberlist" "github.com/prometheus/client_golang/prometheus" + "go.uber.org/atomic" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" @@ -79,7 +79,7 @@ type TCPTransport struct { wg sync.WaitGroup tcpListeners []*net.TCPListener - shutdown int32 + shutdown atomic.Int32 advertiseMu sync.RWMutex advertiseAddr string @@ -172,7 +172,7 @@ func (t *TCPTransport) tcpListen(tcpLn *net.TCPListener) { for { conn, err := tcpLn.AcceptTCP() if err != nil { - if s := atomic.LoadInt32(&t.shutdown); s == 1 { + if s := t.shutdown.Load(); s == 1 { break } @@ -503,7 +503,7 @@ func (t *TCPTransport) StreamCh() <-chan net.Conn { // transport a chance to clean up any listeners. func (t *TCPTransport) Shutdown() error { // This will avoid log spam about errors when we shut down. - atomic.StoreInt32(&t.shutdown, 1) + t.shutdown.Store(1) // Rip through all the connections and shut them down. for _, conn := range t.tcpListeners { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go index a026a27064d0a..fb473d7263718 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go @@ -227,6 +227,9 @@ func (i *Lifecycler) CheckReady(ctx context.Context) error { } if err := ringDesc.Ready(time.Now(), i.cfg.RingConfig.HeartbeatTimeout); err != nil { + level.Warn(util.Logger).Log("msg", "found an existing ingester(s) with a problem in the ring, "+ + "this ingester cannot complete joining and become ready until this problem is resolved. "+ + "The /ring http endpoint on the distributor (or single binary) provides visibility into the ring.", "err", err) return err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/compat.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/compat.go index 597ac27618497..50e0247b780db 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/compat.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/compat.go @@ -4,6 +4,9 @@ import ( "context" "time" + "github.com/go-kit/kit/log" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/notifier" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/rules" @@ -17,14 +20,15 @@ import ( type Pusher interface { Push(context.Context, *client.WriteRequest) (*client.WriteResponse, error) } -type appendable struct { + +type pusherAppender struct { pusher Pusher labels []labels.Labels samples []client.Sample userID string } -func (a *appendable) Add(l labels.Labels, t int64, v float64) (uint64, error) { +func (a *pusherAppender) Add(l labels.Labels, t int64, v float64) (uint64, error) { a.labels = append(a.labels, l) a.samples = append(a.samples, client.Sample{ TimestampMs: t, @@ -33,11 +37,11 @@ func (a *appendable) Add(l labels.Labels, t int64, v float64) (uint64, error) { return 0, nil } -func (a *appendable) AddFast(_ uint64, _ int64, _ float64) error { +func (a *pusherAppender) AddFast(_ uint64, _ int64, _ float64) error { return storage.ErrNotFound } -func (a *appendable) Commit() error { +func (a *pusherAppender) Commit() error { // Since a.pusher is distributor, client.ReuseSlice will be called in a.pusher.Push. // We shouldn't call client.ReuseSlice here. _, err := a.pusher.Push(user.InjectOrgID(context.Background(), a.userID), client.ToWriteRequest(a.labels, a.samples, nil, client.RULE)) @@ -46,21 +50,21 @@ func (a *appendable) Commit() error { return err } -func (a *appendable) Rollback() error { +func (a *pusherAppender) Rollback() error { a.labels = nil a.samples = nil return nil } -// appender fulfills the storage.Appendable interface for prometheus manager -type appender struct { +// PusherAppendable fulfills the storage.Appendable interface for prometheus manager +type PusherAppendable struct { pusher Pusher userID string } // Appender returns a storage.Appender -func (t *appender) Appender() storage.Appender { - return &appendable{ +func (t *PusherAppendable) Appender() storage.Appender { + return &pusherAppender{ pusher: t.pusher, userID: t.userID, } @@ -74,3 +78,40 @@ func engineQueryFunc(engine *promql.Engine, q storage.Queryable, delay time.Dura return orig(ctx, qs, t.Add(-delay)) } } + +type ManagerFactory = func( + ctx context.Context, + userID string, + notifier *notifier.Manager, + logger log.Logger, + reg prometheus.Registerer, +) *rules.Manager + +func DefaultTenantManagerFactory( + cfg Config, + p Pusher, + q storage.Queryable, + engine *promql.Engine, +) ManagerFactory { + return func( + ctx context.Context, + userID string, + notifier *notifier.Manager, + logger log.Logger, + reg prometheus.Registerer, + ) *rules.Manager { + return rules.NewManager(&rules.ManagerOptions{ + Appendable: &PusherAppendable{pusher: p, userID: userID}, + Queryable: q, + QueryFunc: engineQueryFunc(engine, q, cfg.EvaluationDelay), + Context: user.InjectOrgID(ctx, userID), + ExternalURL: cfg.ExternalURL.URL, + NotifyFunc: SendAlerts(notifier, cfg.ExternalURL.URL.String()), + Logger: log.With(logger, "user", userID), + Registerer: reg, + OutageTolerance: cfg.OutageTolerance, + ForGracePeriod: cfg.ForGracePeriod, + ResendDelay: cfg.ResendDelay, + }) + } +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/manager_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/manager_metrics.go new file mode 100644 index 0000000000000..27aa32f654018 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/manager_metrics.go @@ -0,0 +1,149 @@ +package ruler + +import ( + "sync" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/cortexproject/cortex/pkg/util" +) + +// ManagerMetrics aggregates metrics exported by the Prometheus +// rules package and returns them as Cortex metrics +type ManagerMetrics struct { + // Maps userID -> registry + regsMu sync.Mutex + regs map[string]*prometheus.Registry + + EvalDuration *prometheus.Desc + IterationDuration *prometheus.Desc + IterationsMissed *prometheus.Desc + IterationsScheduled *prometheus.Desc + EvalTotal *prometheus.Desc + EvalFailures *prometheus.Desc + GroupInterval *prometheus.Desc + GroupLastEvalTime *prometheus.Desc + GroupLastDuration *prometheus.Desc + GroupRules *prometheus.Desc +} + +// NewManagerMetrics returns a ManagerMetrics struct +func NewManagerMetrics() *ManagerMetrics { + return &ManagerMetrics{ + regs: map[string]*prometheus.Registry{}, + regsMu: sync.Mutex{}, + + EvalDuration: prometheus.NewDesc( + "cortex_prometheus_rule_evaluation_duration_seconds", + "The duration for a rule to execute.", + []string{"user"}, + nil, + ), + IterationDuration: prometheus.NewDesc( + "cortex_prometheus_rule_group_duration_seconds", + "The duration of rule group evaluations.", + []string{"user"}, + nil, + ), + IterationsMissed: prometheus.NewDesc( + "cortex_prometheus_rule_group_iterations_missed_total", + "The total number of rule group evaluations missed due to slow rule group evaluation.", + []string{"user"}, + nil, + ), + IterationsScheduled: prometheus.NewDesc( + "cortex_prometheus_rule_group_iterations_total", + "The total number of scheduled rule group evaluations, whether executed or missed.", + []string{"user"}, + nil, + ), + EvalTotal: prometheus.NewDesc( + "cortex_prometheus_rule_evaluations_total", + "The total number of rule evaluations.", + []string{"user", "rule_group"}, + nil, + ), + EvalFailures: prometheus.NewDesc( + "cortex_prometheus_rule_evaluation_failures_total", + "The total number of rule evaluation failures.", + []string{"user", "rule_group"}, + nil, + ), + GroupInterval: prometheus.NewDesc( + "cortex_prometheus_rule_group_interval_seconds", + "The interval of a rule group.", + []string{"user", "rule_group"}, + nil, + ), + GroupLastEvalTime: prometheus.NewDesc( + "cortex_prometheus_rule_group_last_evaluation_timestamp_seconds", + "The timestamp of the last rule group evaluation in seconds.", + []string{"user", "rule_group"}, + nil, + ), + GroupLastDuration: prometheus.NewDesc( + "cortex_prometheus_rule_group_last_duration_seconds", + "The duration of the last rule group evaluation.", + []string{"user", "rule_group"}, + nil, + ), + GroupRules: prometheus.NewDesc( + "cortex_prometheus_rule_group_rules", + "The number of rules.", + []string{"user", "rule_group"}, + nil, + ), + } +} + +// AddUserRegistry adds a Prometheus registry to the struct +func (m *ManagerMetrics) AddUserRegistry(user string, reg *prometheus.Registry) { + m.regsMu.Lock() + m.regs[user] = reg + m.regsMu.Unlock() +} + +// Registries returns a map of prometheus registries managed by the struct +func (m *ManagerMetrics) Registries() map[string]*prometheus.Registry { + regs := map[string]*prometheus.Registry{} + + m.regsMu.Lock() + defer m.regsMu.Unlock() + for uid, r := range m.regs { + regs[uid] = r + } + + return regs +} + +// Describe implements the Collector interface +func (m *ManagerMetrics) Describe(out chan<- *prometheus.Desc) { + out <- m.EvalDuration + out <- m.IterationDuration + out <- m.IterationsMissed + out <- m.IterationsScheduled + out <- m.EvalTotal + out <- m.EvalFailures + out <- m.GroupInterval + out <- m.GroupLastEvalTime + out <- m.GroupLastDuration + out <- m.GroupRules +} + +// Collect implements the Collector interface +func (m *ManagerMetrics) Collect(out chan<- prometheus.Metric) { + data := util.BuildMetricFamiliesPerUserFromUserRegistries(m.Registries()) + + data.SendSumOfSummariesPerUser(out, m.EvalDuration, "prometheus_rule_evaluation_duration_seconds") + data.SendSumOfSummariesPerUser(out, m.IterationDuration, "cortex_prometheus_rule_group_duration_seconds") + + data.SendSumOfCountersPerUser(out, m.IterationsMissed, "prometheus_rule_group_iterations_missed_total") + data.SendSumOfCountersPerUser(out, m.IterationsScheduled, "prometheus_rule_group_iterations_total") + + data.SendSumOfCountersPerUserWithLabels(out, m.EvalTotal, "prometheus_rule_evaluations_total", "rule_group") + data.SendSumOfCountersPerUserWithLabels(out, m.EvalFailures, "prometheus_rule_evaluation_failures_total", "rule_group") + data.SendSumOfGaugesPerUserWithLabels(out, m.GroupInterval, "prometheus_rule_group_interval_seconds", "rule_group") + data.SendSumOfGaugesPerUserWithLabels(out, m.GroupLastEvalTime, "prometheus_rule_group_last_evaluation_timestamp_seconds", "rule_group") + data.SendSumOfGaugesPerUserWithLabels(out, m.GroupLastDuration, "prometheus_rule_group_last_duration_seconds", "rule_group") + data.SendSumOfGaugesPerUserWithLabels(out, m.GroupRules, "prometheus_rule_group_rules", "rule_group") +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go index 5df91741c30bd..6d35943d1c015 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go @@ -20,9 +20,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/notifier" - "github.com/prometheus/prometheus/promql" promRules "github.com/prometheus/prometheus/rules" - promStorage "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/util/strutil" "github.com/weaveworks/common/user" "golang.org/x/net/context/ctxhttp" @@ -155,21 +153,22 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { type Ruler struct { services.Service - cfg Config - engine *promql.Engine - queryable promStorage.Queryable - pusher Pusher - alertURL *url.URL - notifierCfg *config.Config + cfg Config + notifierCfg *config.Config + managerFactory ManagerFactory lifecycler *ring.BasicLifecycler ring *ring.Ring subservices *services.Manager - store rules.RuleStore - mapper *mapper - userManagerMtx sync.Mutex - userManagers map[string]*promRules.Manager + store rules.RuleStore + mapper *mapper + + // Structs for holding per-user Prometheus rules Managers + // and a corresponding metrics struct + userManagerMtx sync.Mutex + userManagers map[string]*promRules.Manager + userManagerMetrics *ManagerMetrics // Per-user notifiers with separate queues. notifiersMtx sync.Mutex @@ -180,25 +179,29 @@ type Ruler struct { } // NewRuler creates a new ruler from a distributor and chunk store. -func NewRuler(cfg Config, engine *promql.Engine, queryable promStorage.Queryable, pusher Pusher, reg prometheus.Registerer, logger log.Logger, ruleStore rules.RuleStore) (*Ruler, error) { +func NewRuler(cfg Config, managerFactory ManagerFactory, reg prometheus.Registerer, logger log.Logger, ruleStore rules.RuleStore) (*Ruler, error) { ncfg, err := buildNotifierConfig(&cfg) if err != nil { return nil, err } + userManagerMetrics := NewManagerMetrics() + + if reg != nil { + reg.MustRegister(userManagerMetrics) + } + ruler := &Ruler{ - cfg: cfg, - engine: engine, - queryable: queryable, - alertURL: cfg.ExternalURL.URL, - notifierCfg: ncfg, - notifiers: map[string]*rulerNotifier{}, - store: ruleStore, - pusher: pusher, - mapper: newMapper(cfg.RulePath, logger), - userManagers: map[string]*promRules.Manager{}, - registry: reg, - logger: logger, + cfg: cfg, + notifierCfg: ncfg, + managerFactory: managerFactory, + notifiers: map[string]*rulerNotifier{}, + store: ruleStore, + mapper: newMapper(cfg.RulePath, logger), + userManagers: map[string]*promRules.Manager{}, + userManagerMetrics: userManagerMetrics, + registry: reg, + logger: logger, } if cfg.EnableSharding { @@ -292,11 +295,11 @@ func (r *Ruler) stopping(_ error) error { return nil } -// sendAlerts implements a rules.NotifyFunc for a Notifier. +// SendAlerts implements a rules.NotifyFunc for a Notifier. // It filters any non-firing alerts from the input. // // Copied from Prometheus's main.go. -func sendAlerts(n *notifier.Manager, externalURL string) promRules.NotifyFunc { +func SendAlerts(n *notifier.Manager, externalURL string) promRules.NotifyFunc { return func(ctx context.Context, expr string, alerts ...*promRules.Alert) { var res []*notifier.Alert @@ -531,24 +534,13 @@ func (r *Ruler) newManager(ctx context.Context, userID string) (*promRules.Manag return nil, err } - // Wrap registerer with userID and cortex_ prefix - reg := prometheus.WrapRegistererWith(prometheus.Labels{"user": userID}, r.registry) - reg = prometheus.WrapRegistererWithPrefix("cortex_", reg) + // Create a new Prometheus registry and register it within + // our metrics struct for the provided user. + reg := prometheus.NewRegistry() + r.userManagerMetrics.AddUserRegistry(userID, reg) + logger := log.With(r.logger, "user", userID) - opts := &promRules.ManagerOptions{ - Appendable: &appender{pusher: r.pusher, userID: userID}, - Queryable: r.queryable, - QueryFunc: engineQueryFunc(r.engine, r.queryable, r.cfg.EvaluationDelay), - Context: user.InjectOrgID(ctx, userID), - ExternalURL: r.alertURL, - NotifyFunc: sendAlerts(notifier, r.alertURL.String()), - Logger: logger, - Registerer: reg, - OutageTolerance: r.cfg.OutageTolerance, - ForGracePeriod: r.cfg.ForGracePeriod, - ResendDelay: r.cfg.ResendDelay, - } - return promRules.NewManager(opts), nil + return r.managerFactory(ctx, userID, notifier, logger, reg), nil } // GetRules retrieves the running rules from this ruler and all running rulers in the ring if diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/azure/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/azure/config.go index 98e721160ac68..10343f522889a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/azure/config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/azure/config.go @@ -17,7 +17,7 @@ type Config struct { // RegisterFlags registers the flags for TSDB Azure storage func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.RegisterFlagsWithPrefix("experimental.tsdb.", f) + cfg.RegisterFlagsWithPrefix("experimental.blocks-storage.", f) } // RegisterFlagsWithPrefix registers the flags for TSDB Azure storage diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/filesystem/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/filesystem/config.go index 4834b5fa23337..28dc109e2030a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/filesystem/config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/filesystem/config.go @@ -9,7 +9,7 @@ type Config struct { // RegisterFlags registers the flags for TSDB filesystem storage func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.RegisterFlagsWithPrefix("experimental.tsdb.", f) + cfg.RegisterFlagsWithPrefix("experimental.blocks-storage.", f) } // RegisterFlagsWithPrefix registers the flags for TSDB filesystem storage with the provided prefix diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/gcs/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/gcs/config.go index d46131ffadcdc..899e706dd91c5 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/gcs/config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/gcs/config.go @@ -14,7 +14,7 @@ type Config struct { // RegisterFlags registers the flags for TSDB GCS storage func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.RegisterFlagsWithPrefix("experimental.tsdb.", f) + cfg.RegisterFlagsWithPrefix("experimental.blocks-storage.", f) } // RegisterFlagsWithPrefix registers the flags for TSDB GCS storage with the provided prefix diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/s3/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/s3/config.go index ddd93f3f7f793..f5f396cb0a8f2 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/s3/config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/s3/config.go @@ -17,7 +17,7 @@ type Config struct { // RegisterFlags registers the flags for TSDB s3 storage with the provided prefix func (cfg *Config) RegisterFlags(f *flag.FlagSet) { - cfg.RegisterFlagsWithPrefix("experimental.tsdb.", f) + cfg.RegisterFlagsWithPrefix("experimental.blocks-storage.", f) } // RegisterFlagsWithPrefix registers the flags for TSDB s3 storage with the provided prefix diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucket_client.go index 9d61dc276e870..cccbe16be840d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucket_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucket_client.go @@ -14,7 +14,7 @@ import ( ) // NewBucketClient creates a new bucket client based on the configured backend -func NewBucketClient(ctx context.Context, cfg Config, name string, logger log.Logger, reg prometheus.Registerer) (client objstore.Bucket, err error) { +func NewBucketClient(ctx context.Context, cfg BlocksStorageConfig, name string, logger log.Logger, reg prometheus.Registerer) (client objstore.Bucket, err error) { switch cfg.Backend { case BackendS3: client, err = s3.NewBucketClient(cfg.S3, name, logger) diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go index 6049cf54e4fa9..61ff7e3e33ebc 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go @@ -56,34 +56,18 @@ var ( errEmptyBlockranges = errors.New("empty block ranges for TSDB") ) -// Config holds the config information for TSDB storage -type Config struct { - Dir string `yaml:"dir"` - BlockRanges DurationList `yaml:"block_ranges_period"` - Retention time.Duration `yaml:"retention_period"` - ShipInterval time.Duration `yaml:"ship_interval"` - ShipConcurrency int `yaml:"ship_concurrency"` - Backend string `yaml:"backend"` - BucketStore BucketStoreConfig `yaml:"bucket_store"` - HeadCompactionInterval time.Duration `yaml:"head_compaction_interval"` - HeadCompactionConcurrency int `yaml:"head_compaction_concurrency"` - HeadCompactionIdleTimeout time.Duration `yaml:"head_compaction_idle_timeout"` - StripeSize int `yaml:"stripe_size"` - WALCompressionEnabled bool `yaml:"wal_compression_enabled"` - FlushBlocksOnShutdown bool `yaml:"flush_blocks_on_shutdown"` - - // MaxTSDBOpeningConcurrencyOnStartup limits the number of concurrently opening TSDB's during startup - MaxTSDBOpeningConcurrencyOnStartup int `yaml:"max_tsdb_opening_concurrency_on_startup"` +// BlocksStorageConfig holds the config information for the blocks storage. +//nolint:golint +type BlocksStorageConfig struct { + Backend string `yaml:"backend"` + BucketStore BucketStoreConfig `yaml:"bucket_store" doc:"description=This configures how the store-gateway synchronizes blocks stored in the bucket."` + TSDB TSDBConfig `yaml:"tsdb"` // Backends S3 s3.Config `yaml:"s3"` GCS gcs.Config `yaml:"gcs"` Azure azure.Config `yaml:"azure"` Filesystem filesystem.Config `yaml:"filesystem"` - - // If true, user TSDBs are not closed on shutdown. Only for testing. - // If false (default), user TSDBs are closed to make sure all resources are released and closed properly. - KeepUserTSDBOpenOnShutdown bool `yaml:"-"` } // DurationList is the block ranges for a tsdb @@ -124,38 +108,75 @@ func (d *DurationList) ToMilliseconds() []int64 { } // RegisterFlags registers the TSDB flags -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { +func (cfg *BlocksStorageConfig) RegisterFlags(f *flag.FlagSet) { cfg.S3.RegisterFlags(f) cfg.GCS.RegisterFlags(f) cfg.Azure.RegisterFlags(f) cfg.BucketStore.RegisterFlags(f) cfg.Filesystem.RegisterFlags(f) + cfg.TSDB.RegisterFlags(f) - if len(cfg.BlockRanges) == 0 { - cfg.BlockRanges = []time.Duration{2 * time.Hour} // Default 2h block - } - - f.StringVar(&cfg.Dir, "experimental.tsdb.dir", "tsdb", "Local directory to store TSDBs in the ingesters.") - f.Var(&cfg.BlockRanges, "experimental.tsdb.block-ranges-period", "TSDB blocks range period.") - f.DurationVar(&cfg.Retention, "experimental.tsdb.retention-period", 6*time.Hour, "TSDB blocks retention in the ingester before a block is removed. This should be larger than the block_ranges_period and large enough to give store-gateways and queriers enough time to discover newly uploaded blocks.") - f.DurationVar(&cfg.ShipInterval, "experimental.tsdb.ship-interval", 1*time.Minute, "How frequently the TSDB blocks are scanned and new ones are shipped to the storage. 0 means shipping is disabled.") - f.IntVar(&cfg.ShipConcurrency, "experimental.tsdb.ship-concurrency", 10, "Maximum number of tenants concurrently shipping blocks to the storage.") - f.StringVar(&cfg.Backend, "experimental.tsdb.backend", "s3", fmt.Sprintf("Backend storage to use. Supported backends are: %s.", strings.Join(supportedBackends, ", "))) - f.IntVar(&cfg.MaxTSDBOpeningConcurrencyOnStartup, "experimental.tsdb.max-tsdb-opening-concurrency-on-startup", 10, "limit the number of concurrently opening TSDB's on startup") - f.DurationVar(&cfg.HeadCompactionInterval, "experimental.tsdb.head-compaction-interval", 1*time.Minute, "How frequently does Cortex try to compact TSDB head. Block is only created if data covers smallest block range. Must be greater than 0 and max 5 minutes.") - f.IntVar(&cfg.HeadCompactionConcurrency, "experimental.tsdb.head-compaction-concurrency", 5, "Maximum number of tenants concurrently compacting TSDB head into a new block") - f.DurationVar(&cfg.HeadCompactionIdleTimeout, "experimental.tsdb.head-compaction-idle-timeout", 1*time.Hour, "If TSDB head is idle for this duration, it is compacted. 0 means disabled.") - f.IntVar(&cfg.StripeSize, "experimental.tsdb.stripe-size", 16384, "The number of shards of series to use in TSDB (must be a power of 2). Reducing this will decrease memory footprint, but can negatively impact performance.") - f.BoolVar(&cfg.WALCompressionEnabled, "experimental.tsdb.wal-compression-enabled", false, "True to enable TSDB WAL compression.") - f.BoolVar(&cfg.FlushBlocksOnShutdown, "experimental.tsdb.flush-blocks-on-shutdown", false, "If true, and transfer of blocks on shutdown fails or is disabled, incomplete blocks are flushed to storage instead. If false, incomplete blocks will be reused after restart, and uploaded when finished.") + f.StringVar(&cfg.Backend, "experimental.blocks-storage.backend", "s3", fmt.Sprintf("Backend storage to use. Supported backends are: %s.", strings.Join(supportedBackends, ", "))) } // Validate the config. -func (cfg *Config) Validate() error { +func (cfg *BlocksStorageConfig) Validate() error { if !util.StringsContain(supportedBackends, cfg.Backend) { return errUnsupportedStorageBackend } + if err := cfg.TSDB.Validate(); err != nil { + return err + } + + return cfg.BucketStore.Validate() +} + +// TSDBConfig holds the config for TSDB opened in the ingesters. +//nolint:golint +type TSDBConfig struct { + Dir string `yaml:"dir"` + BlockRanges DurationList `yaml:"block_ranges_period"` + Retention time.Duration `yaml:"retention_period"` + ShipInterval time.Duration `yaml:"ship_interval"` + ShipConcurrency int `yaml:"ship_concurrency"` + HeadCompactionInterval time.Duration `yaml:"head_compaction_interval"` + HeadCompactionConcurrency int `yaml:"head_compaction_concurrency"` + HeadCompactionIdleTimeout time.Duration `yaml:"head_compaction_idle_timeout"` + StripeSize int `yaml:"stripe_size"` + WALCompressionEnabled bool `yaml:"wal_compression_enabled"` + FlushBlocksOnShutdown bool `yaml:"flush_blocks_on_shutdown"` + + // MaxTSDBOpeningConcurrencyOnStartup limits the number of concurrently opening TSDB's during startup. + MaxTSDBOpeningConcurrencyOnStartup int `yaml:"max_tsdb_opening_concurrency_on_startup"` + + // If true, user TSDBs are not closed on shutdown. Only for testing. + // If false (default), user TSDBs are closed to make sure all resources are released and closed properly. + KeepUserTSDBOpenOnShutdown bool `yaml:"-"` +} + +// RegisterFlags registers the TSDBConfig flags. +func (cfg *TSDBConfig) RegisterFlags(f *flag.FlagSet) { + if len(cfg.BlockRanges) == 0 { + cfg.BlockRanges = []time.Duration{2 * time.Hour} // Default 2h block + } + + f.StringVar(&cfg.Dir, "experimental.blocks-storage.tsdb.dir", "tsdb", "Local directory to store TSDBs in the ingesters.") + f.Var(&cfg.BlockRanges, "experimental.blocks-storage.tsdb.block-ranges-period", "TSDB blocks range period.") + f.DurationVar(&cfg.Retention, "experimental.blocks-storage.tsdb.retention-period", 6*time.Hour, "TSDB blocks retention in the ingester before a block is removed. This should be larger than the block_ranges_period and large enough to give store-gateways and queriers enough time to discover newly uploaded blocks.") + f.DurationVar(&cfg.ShipInterval, "experimental.blocks-storage.tsdb.ship-interval", 1*time.Minute, "How frequently the TSDB blocks are scanned and new ones are shipped to the storage. 0 means shipping is disabled.") + f.IntVar(&cfg.ShipConcurrency, "experimental.blocks-storage.tsdb.ship-concurrency", 10, "Maximum number of tenants concurrently shipping blocks to the storage.") + f.IntVar(&cfg.MaxTSDBOpeningConcurrencyOnStartup, "experimental.blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup", 10, "limit the number of concurrently opening TSDB's on startup") + f.DurationVar(&cfg.HeadCompactionInterval, "experimental.blocks-storage.tsdb.head-compaction-interval", 1*time.Minute, "How frequently does Cortex try to compact TSDB head. Block is only created if data covers smallest block range. Must be greater than 0 and max 5 minutes.") + f.IntVar(&cfg.HeadCompactionConcurrency, "experimental.blocks-storage.tsdb.head-compaction-concurrency", 5, "Maximum number of tenants concurrently compacting TSDB head into a new block") + f.DurationVar(&cfg.HeadCompactionIdleTimeout, "experimental.blocks-storage.tsdb.head-compaction-idle-timeout", 1*time.Hour, "If TSDB head is idle for this duration, it is compacted. 0 means disabled.") + f.IntVar(&cfg.StripeSize, "experimental.blocks-storage.tsdb.stripe-size", 16384, "The number of shards of series to use in TSDB (must be a power of 2). Reducing this will decrease memory footprint, but can negatively impact performance.") + f.BoolVar(&cfg.WALCompressionEnabled, "experimental.blocks-storage.tsdb.wal-compression-enabled", false, "True to enable TSDB WAL compression.") + f.BoolVar(&cfg.FlushBlocksOnShutdown, "experimental.blocks-storage.tsdb.flush-blocks-on-shutdown", false, "If true, and transfer of blocks on shutdown fails or is disabled, incomplete blocks are flushed to storage instead. If false, incomplete blocks will be reused after restart, and uploaded when finished.") +} + +// Validate the config. +func (cfg *TSDBConfig) Validate() error { if cfg.ShipInterval > 0 && cfg.ShipConcurrency <= 0 { return errInvalidShipConcurrency } @@ -176,7 +197,13 @@ func (cfg *Config) Validate() error { return errEmptyBlockranges } - return cfg.BucketStore.Validate() + return nil +} + +// BlocksDir returns the directory path where TSDB blocks and wal should be +// stored by the ingester +func (cfg *TSDBConfig) BlocksDir(userID string) string { + return filepath.Join(cfg.Dir, userID) } // BucketStoreConfig holds the config information for Bucket Stores used by the querier @@ -204,22 +231,22 @@ type BucketStoreConfig struct { // RegisterFlags registers the BucketStore flags func (cfg *BucketStoreConfig) RegisterFlags(f *flag.FlagSet) { - cfg.IndexCache.RegisterFlagsWithPrefix(f, "experimental.tsdb.bucket-store.index-cache.") - cfg.ChunksCache.RegisterFlagsWithPrefix(f, "experimental.tsdb.bucket-store.chunks-cache.") - cfg.MetadataCache.RegisterFlagsWithPrefix(f, "experimental.tsdb.bucket-store.metadata-cache.") - - f.StringVar(&cfg.SyncDir, "experimental.tsdb.bucket-store.sync-dir", "tsdb-sync", "Directory to store synchronized TSDB index headers.") - f.DurationVar(&cfg.SyncInterval, "experimental.tsdb.bucket-store.sync-interval", 5*time.Minute, "How frequently scan the bucket to look for changes (new blocks shipped by ingesters and blocks removed by retention or compaction). 0 disables it.") - f.Uint64Var(&cfg.MaxChunkPoolBytes, "experimental.tsdb.bucket-store.max-chunk-pool-bytes", uint64(2*units.Gibibyte), "Max size - in bytes - of a per-tenant chunk pool, used to reduce memory allocations.") - f.IntVar(&cfg.MaxConcurrent, "experimental.tsdb.bucket-store.max-concurrent", 100, "Max number of concurrent queries to execute against the long-term storage. The limit is shared across all tenants.") - f.IntVar(&cfg.TenantSyncConcurrency, "experimental.tsdb.bucket-store.tenant-sync-concurrency", 10, "Maximum number of concurrent tenants synching blocks.") - f.IntVar(&cfg.BlockSyncConcurrency, "experimental.tsdb.bucket-store.block-sync-concurrency", 20, "Maximum number of concurrent blocks synching per tenant.") - f.IntVar(&cfg.MetaSyncConcurrency, "experimental.tsdb.bucket-store.meta-sync-concurrency", 20, "Number of Go routines to use when syncing block meta files from object storage per tenant.") - f.DurationVar(&cfg.ConsistencyDelay, "experimental.tsdb.bucket-store.consistency-delay", 0, "Minimum age of a block before it's being read. Set it to safe value (e.g 30m) if your object storage is eventually consistent. GCS and S3 are (roughly) strongly consistent.") - f.DurationVar(&cfg.IgnoreDeletionMarksDelay, "experimental.tsdb.bucket-store.ignore-deletion-marks-delay", time.Hour*6, "Duration after which the blocks marked for deletion will be filtered out while fetching blocks. "+ + cfg.IndexCache.RegisterFlagsWithPrefix(f, "experimental.blocks-storage.bucket-store.index-cache.") + cfg.ChunksCache.RegisterFlagsWithPrefix(f, "experimental.blocks-storage.bucket-store.chunks-cache.") + cfg.MetadataCache.RegisterFlagsWithPrefix(f, "experimental.blocks-storage.bucket-store.metadata-cache.") + + f.StringVar(&cfg.SyncDir, "experimental.blocks-storage.bucket-store.sync-dir", "tsdb-sync", "Directory to store synchronized TSDB index headers.") + f.DurationVar(&cfg.SyncInterval, "experimental.blocks-storage.bucket-store.sync-interval", 5*time.Minute, "How frequently scan the bucket to look for changes (new blocks shipped by ingesters and blocks removed by retention or compaction). 0 disables it.") + f.Uint64Var(&cfg.MaxChunkPoolBytes, "experimental.blocks-storage.bucket-store.max-chunk-pool-bytes", uint64(2*units.Gibibyte), "Max size - in bytes - of a per-tenant chunk pool, used to reduce memory allocations.") + f.IntVar(&cfg.MaxConcurrent, "experimental.blocks-storage.bucket-store.max-concurrent", 100, "Max number of concurrent queries to execute against the long-term storage. The limit is shared across all tenants.") + f.IntVar(&cfg.TenantSyncConcurrency, "experimental.blocks-storage.bucket-store.tenant-sync-concurrency", 10, "Maximum number of concurrent tenants synching blocks.") + f.IntVar(&cfg.BlockSyncConcurrency, "experimental.blocks-storage.bucket-store.block-sync-concurrency", 20, "Maximum number of concurrent blocks synching per tenant.") + f.IntVar(&cfg.MetaSyncConcurrency, "experimental.blocks-storage.bucket-store.meta-sync-concurrency", 20, "Number of Go routines to use when syncing block meta files from object storage per tenant.") + f.DurationVar(&cfg.ConsistencyDelay, "experimental.blocks-storage.bucket-store.consistency-delay", 0, "Minimum age of a block before it's being read. Set it to safe value (e.g 30m) if your object storage is eventually consistent. GCS and S3 are (roughly) strongly consistent.") + f.DurationVar(&cfg.IgnoreDeletionMarksDelay, "experimental.blocks-storage.bucket-store.ignore-deletion-marks-delay", time.Hour*6, "Duration after which the blocks marked for deletion will be filtered out while fetching blocks. "+ "The idea of ignore-deletion-marks-delay is to ignore blocks that are marked for deletion with some delay. This ensures store can still serve blocks that are meant to be deleted but do not have a replacement yet. "+ "Default is 6h, half of the default value for -compactor.deletion-delay.") - f.IntVar(&cfg.PostingOffsetsInMemSampling, "experimental.tsdb.bucket-store.posting-offsets-in-mem-sampling", store.DefaultPostingOffsetInMemorySampling, "Controls what is the ratio of postings offsets that the store will hold in memory.") + f.IntVar(&cfg.PostingOffsetsInMemSampling, "experimental.blocks-storage.bucket-store.posting-offsets-in-mem-sampling", store.DefaultPostingOffsetInMemorySampling, "Controls what is the ratio of postings offsets that the store will hold in memory.") } // Validate the config. @@ -238,9 +265,3 @@ func (cfg *BucketStoreConfig) Validate() error { } return nil } - -// BlocksDir returns the directory path where TSDB blocks and wal should be -// stored by the ingester -func (cfg *Config) BlocksDir(userID string) string { - return filepath.Join(cfg.Dir, userID) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/index_cache.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/index_cache.go index 6520d8dae86c0..dde7067be1f11 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/index_cache.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/index_cache.go @@ -44,7 +44,7 @@ type IndexCacheConfig struct { } func (cfg *IndexCacheConfig) RegisterFlags(f *flag.FlagSet) { - cfg.RegisterFlagsWithPrefix(f, "experimental.tsdb.bucket-store.index-cache.") + cfg.RegisterFlagsWithPrefix(f, "experimental.blocks-storage.bucket-store.index-cache.") } func (cfg *IndexCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/ref_cache.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/ref_cache.go index 0c5447c135af7..27f625b24c77d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/ref_cache.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/ref_cache.go @@ -2,11 +2,11 @@ package tsdb import ( "sync" - "sync/atomic" "time" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" + "go.uber.org/atomic" "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/util" @@ -41,7 +41,7 @@ type refCacheStripe struct { type refCacheEntry struct { lbs labels.Labels ref uint64 - touchedAt int64 // Unix nano time. + touchedAt atomic.Int64 // Unix nano time. } // NewRefCache makes a new RefCache. @@ -93,7 +93,7 @@ func (s *refCacheStripe) ref(now time.Time, series labels.Labels, fp model.Finge for ix := range entries { if labels.Equal(entries[ix].lbs, series) { // Since we use read-only lock, we need to use atomic update. - atomic.StoreInt64(&entries[ix].touchedAt, now.UnixNano()) + entries[ix].touchedAt.Store(now.UnixNano()) return entries[ix].ref, true } } @@ -112,13 +112,15 @@ func (s *refCacheStripe) setRef(now time.Time, series labels.Labels, fp model.Fi } entry.ref = ref - entry.touchedAt = now.UnixNano() + entry.touchedAt.Store(now.UnixNano()) s.refs[fp][ix] = entry return } // The entry doesn't exist, so we have to add a new one. - s.refs[fp] = append(s.refs[fp], refCacheEntry{lbs: series, ref: ref, touchedAt: now.UnixNano()}) + refCacheEntry := refCacheEntry{lbs: series, ref: ref} + refCacheEntry.touchedAt.Store(now.UnixNano()) + s.refs[fp] = append(s.refs[fp], refCacheEntry) } func (s *refCacheStripe) purge(keepUntil time.Time) { @@ -131,7 +133,7 @@ func (s *refCacheStripe) purge(keepUntil time.Time) { // Since we do expect very few fingerprint collisions, we // have an optimized implementation for the common case. if len(entries) == 1 { - if entries[0].touchedAt < keepUntilNanos { + if entries[0].touchedAt.Load() < keepUntilNanos { delete(s.refs, fp) } @@ -141,7 +143,7 @@ func (s *refCacheStripe) purge(keepUntil time.Time) { // We have more entries, which means there's a collision, // so we have to iterate over the entries. for i := 0; i < len(entries); { - if entries[i].touchedAt < keepUntilNanos { + if entries[i].touchedAt.Load() < keepUntilNanos { entries = append(entries[:i], entries[i+1:]...) } else { i++ diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_store_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_store_metrics.go index 853478c2517b8..096e17af1ca6c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_store_metrics.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_store_metrics.go @@ -30,6 +30,7 @@ type BucketStoreMetrics struct { seriesMergeDuration *prometheus.Desc seriesRefetches *prometheus.Desc resultSeriesCount *prometheus.Desc + queriesDropped *prometheus.Desc cachedPostingsCompressions *prometheus.Desc cachedPostingsCompressionErrors *prometheus.Desc @@ -99,6 +100,10 @@ func NewBucketStoreMetrics() *BucketStoreMetrics { "cortex_bucket_store_series_result_series", "Number of series observed in the final result of a query.", nil, nil), + queriesDropped: prometheus.NewDesc( + "cortex_bucket_store_queries_dropped_total", + "Number of queries that were dropped due to the max chunks per query limit.", + nil, nil), cachedPostingsCompressions: prometheus.NewDesc( "cortex_bucket_store_cached_postings_compressions_total", @@ -156,6 +161,7 @@ func (m *BucketStoreMetrics) Describe(out chan<- *prometheus.Desc) { out <- m.seriesMergeDuration out <- m.seriesRefetches out <- m.resultSeriesCount + out <- m.queriesDropped out <- m.cachedPostingsCompressions out <- m.cachedPostingsCompressionErrors @@ -184,10 +190,11 @@ func (m *BucketStoreMetrics) Collect(out chan<- prometheus.Metric) { data.SendSumOfHistograms(out, m.seriesMergeDuration, "thanos_bucket_store_series_merge_duration_seconds") data.SendSumOfCounters(out, m.seriesRefetches, "thanos_bucket_store_series_refetches_total") data.SendSumOfSummaries(out, m.resultSeriesCount, "thanos_bucket_store_series_result_series") + data.SendSumOfCounters(out, m.queriesDropped, "thanos_bucket_store_queries_dropped_total") data.SendSumOfCountersWithLabels(out, m.cachedPostingsCompressions, "thanos_bucket_store_cached_postings_compressions_total", "op") data.SendSumOfCountersWithLabels(out, m.cachedPostingsCompressionErrors, "thanos_bucket_store_cached_postings_compression_errors_total", "op") - data.SendSumOfCountersWithLabels(out, m.cachedPostingsCompressionTimeSeconds, "thanos_bucket_store_cached_postings_compression_time_seconds", "op") + data.SendSumOfCountersWithLabels(out, m.cachedPostingsCompressionTimeSeconds, "thanos_bucket_store_cached_postings_compression_time_seconds_total", "op") data.SendSumOfCountersWithLabels(out, m.cachedPostingsOriginalSizeBytes, "thanos_bucket_store_cached_postings_original_size_bytes_total") data.SendSumOfCountersWithLabels(out, m.cachedPostingsCompressedSizeBytes, "thanos_bucket_store_cached_postings_compressed_size_bytes_total") } diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_stores.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_stores.go index 381434ca8af3d..8203130d92f36 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_stores.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_stores.go @@ -29,12 +29,14 @@ import ( "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/spanlogger" + "github.com/cortexproject/cortex/pkg/util/validation" ) // BucketStores is a multi-tenant wrapper of Thanos BucketStore. type BucketStores struct { logger log.Logger - cfg tsdb.Config + cfg tsdb.BlocksStorageConfig + limits *validation.Overrides bucket objstore.Bucket logLevel logging.Level bucketStoreMetrics *BucketStoreMetrics @@ -57,7 +59,7 @@ type BucketStores struct { } // NewBucketStores makes a new BucketStores. -func NewBucketStores(cfg tsdb.Config, filters []block.MetadataFilter, bucketClient objstore.Bucket, logLevel logging.Level, logger log.Logger, reg prometheus.Registerer) (*BucketStores, error) { +func NewBucketStores(cfg tsdb.BlocksStorageConfig, filters []block.MetadataFilter, bucketClient objstore.Bucket, limits *validation.Overrides, logLevel logging.Level, logger log.Logger, reg prometheus.Registerer) (*BucketStores, error) { cachingBucket, err := tsdb.CreateCachingBucket(cfg.BucketStore.ChunksCache, cfg.BucketStore.MetadataCache, bucketClient, logger, reg) if err != nil { return nil, errors.Wrapf(err, "create caching bucket") @@ -74,6 +76,7 @@ func NewBucketStores(cfg tsdb.Config, filters []block.MetadataFilter, bucketClie u := &BucketStores{ logger: logger, cfg: cfg, + limits: limits, bucket: cachingBucket, filters: filters, stores: map[string]*store.BucketStore{}, @@ -283,7 +286,7 @@ func (u *BucketStores) getOrCreateStore(userID string) (*store.BucketStore, erro u.indexCache, u.queryGate, u.cfg.BucketStore.MaxChunkPoolBytes, - 0, // No max samples limit (it's flawed in Thanos) + newChunksLimiterFactory(u.limits, userID), u.logLevel.String() == "debug", // Turn on debug logging, if the log level is set to debug u.cfg.BucketStore.BlockSyncConcurrency, nil, // Do not limit timerange. @@ -353,3 +356,11 @@ type spanSeriesServer struct { func (s spanSeriesServer) Context() context.Context { return s.ctx } + +func newChunksLimiterFactory(limits *validation.Overrides, userID string) store.ChunksLimiterFactory { + return func(failedCounter prometheus.Counter) store.ChunksLimiter { + // Since limit overrides could be live reloaded, we have to get the current user's limit + // each time a new limiter is instantiated. + return store.NewLimiter(uint64(limits.MaxChunksPerQuery(userID)), failedCounter) + } +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go index 8ce68c660a6c1..4182a9878981b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go @@ -22,6 +22,7 @@ import ( "github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/services" + "github.com/cortexproject/cortex/pkg/util/validation" ) const ( @@ -58,7 +59,7 @@ type StoreGateway struct { services.Service gatewayCfg Config - storageCfg cortex_tsdb.Config + storageCfg cortex_tsdb.BlocksStorageConfig logger log.Logger stores *BucketStores @@ -73,7 +74,7 @@ type StoreGateway struct { bucketSync *prometheus.CounterVec } -func NewStoreGateway(gatewayCfg Config, storageCfg cortex_tsdb.Config, logLevel logging.Level, logger log.Logger, reg prometheus.Registerer) (*StoreGateway, error) { +func NewStoreGateway(gatewayCfg Config, storageCfg cortex_tsdb.BlocksStorageConfig, limits *validation.Overrides, logLevel logging.Level, logger log.Logger, reg prometheus.Registerer) (*StoreGateway, error) { var ringStore kv.Client bucketClient, err := createBucketClient(storageCfg, logger, reg) @@ -92,10 +93,10 @@ func NewStoreGateway(gatewayCfg Config, storageCfg cortex_tsdb.Config, logLevel } } - return newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, logLevel, logger, reg) + return newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, limits, logLevel, logger, reg) } -func newStoreGateway(gatewayCfg Config, storageCfg cortex_tsdb.Config, bucketClient objstore.Bucket, ringStore kv.Client, logLevel logging.Level, logger log.Logger, reg prometheus.Registerer) (*StoreGateway, error) { +func newStoreGateway(gatewayCfg Config, storageCfg cortex_tsdb.BlocksStorageConfig, bucketClient objstore.Bucket, ringStore kv.Client, limits *validation.Overrides, logLevel logging.Level, logger log.Logger, reg prometheus.Registerer) (*StoreGateway, error) { var err error var filters []block.MetadataFilter @@ -147,7 +148,7 @@ func newStoreGateway(gatewayCfg Config, storageCfg cortex_tsdb.Config, bucketCli filters = append(filters, NewShardingMetadataFilter(g.ring, lifecyclerCfg.Addr, logger)) } - g.stores, err = NewBucketStores(storageCfg, filters, bucketClient, logLevel, logger, extprom.WrapRegistererWith(prometheus.Labels{"component": "store-gateway"}, reg)) + g.stores, err = NewBucketStores(storageCfg, filters, bucketClient, limits, logLevel, logger, extprom.WrapRegistererWith(prometheus.Labels{"component": "store-gateway"}, reg)) if err != nil { return nil, errors.Wrap(err, "create bucket stores") } @@ -305,7 +306,7 @@ func (g *StoreGateway) OnRingInstanceStopping(_ *ring.BasicLifecycler) func (g *StoreGateway) OnRingInstanceHeartbeat(_ *ring.BasicLifecycler, _ *ring.Desc, _ *ring.IngesterDesc) { } -func createBucketClient(cfg cortex_tsdb.Config, logger log.Logger, reg prometheus.Registerer) (objstore.Bucket, error) { +func createBucketClient(cfg cortex_tsdb.BlocksStorageConfig, logger log.Logger, reg prometheus.Registerer) (objstore.Bucket, error) { bucketClient, err := cortex_tsdb.NewBucketClient(context.Background(), cfg, "store-gateway", logger, reg) if err != nil { return nil, errors.Wrap(err, "create bucket client") diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/events.go b/vendor/github.com/cortexproject/cortex/pkg/util/events.go index ec96106be07e1..dba9ec30df3bf 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/events.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/events.go @@ -2,9 +2,9 @@ package util import ( "os" - "sync/atomic" "github.com/go-kit/kit/log" + "go.uber.org/atomic" ) // Provide an "event" interface for observability @@ -43,11 +43,11 @@ func newEventLogger(freq int) log.Logger { type samplingFilter struct { next log.Logger freq int - count int64 + count atomic.Int64 } func (e *samplingFilter) Log(keyvals ...interface{}) error { - count := atomic.AddInt64(&e.count, 1) + count := e.count.Inc() if count%int64(e.freq) == 0 { return e.next.Log(keyvals...) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/grpc/encoding/snappy/snappy.go b/vendor/github.com/cortexproject/cortex/pkg/util/grpc/encoding/snappy/snappy.go new file mode 100644 index 0000000000000..fe01b4ca3511c --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/util/grpc/encoding/snappy/snappy.go @@ -0,0 +1,87 @@ +package snappy + +import ( + "io" + "sync" + + "github.com/golang/snappy" + "google.golang.org/grpc/encoding" +) + +// Name is the name registered for the snappy compressor. +const Name = "snappy" + +func init() { + encoding.RegisterCompressor(newCompressor()) +} + +type compressor struct { + writersPool sync.Pool + readersPool sync.Pool +} + +func newCompressor() *compressor { + c := &compressor{} + c.readersPool = sync.Pool{ + New: func() interface{} { + return snappy.NewReader(nil) + }, + } + c.writersPool = sync.Pool{ + New: func() interface{} { + return snappy.NewBufferedWriter(nil) + }, + } + return c +} + +func (c *compressor) Name() string { + return Name +} + +func (c *compressor) Compress(w io.Writer) (io.WriteCloser, error) { + wr := c.writersPool.Get().(*snappy.Writer) + wr.Reset(w) + return writeCloser{wr, &c.writersPool}, nil +} + +func (c *compressor) Decompress(r io.Reader) (io.Reader, error) { + dr := c.readersPool.Get().(*snappy.Reader) + dr.Reset(r) + return reader{dr, &c.readersPool}, nil +} + +type writeCloser struct { + writer *snappy.Writer + pool *sync.Pool +} + +func (w writeCloser) Write(p []byte) (n int, err error) { + return w.writer.Write(p) +} + +func (w writeCloser) Close() error { + defer func() { + w.writer.Reset(nil) + w.pool.Put(w.writer) + }() + + if w.writer != nil { + return w.writer.Close() + } + return nil +} + +type reader struct { + reader *snappy.Reader + pool *sync.Pool +} + +func (r reader) Read(p []byte) (n int, err error) { + n, err = r.reader.Read(p) + if err == io.EOF { + r.reader.Reset(nil) + r.pool.Put(r.reader) + } + return n, err +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/grpcclient.go b/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/grpcclient.go index f2ef20e9bde9a..8a73616946b52 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/grpcclient.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/grpcclient.go @@ -3,10 +3,16 @@ package grpcclient import ( "flag" + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" + "github.com/pkg/errors" "google.golang.org/grpc" + "google.golang.org/grpc/encoding/gzip" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/flagext" + "github.com/cortexproject/cortex/pkg/util/grpc/encoding/snappy" "github.com/cortexproject/cortex/pkg/util/tls" ) @@ -14,7 +20,8 @@ import ( type Config struct { MaxRecvMsgSize int `yaml:"max_recv_msg_size"` MaxSendMsgSize int `yaml:"max_send_msg_size"` - UseGzipCompression bool `yaml:"use_gzip_compression"` + UseGzipCompression bool `yaml:"use_gzip_compression"` // TODO: Remove this deprecated option in v1.6.0. + GRPCCompression string `yaml:"grpc_compression"` RateLimit float64 `yaml:"rate_limit"` RateLimitBurst int `yaml:"rate_limit_burst"` @@ -31,7 +38,8 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.IntVar(&cfg.MaxRecvMsgSize, prefix+".grpc-max-recv-msg-size", 100<<20, "gRPC client max receive message size (bytes).") f.IntVar(&cfg.MaxSendMsgSize, prefix+".grpc-max-send-msg-size", 16<<20, "gRPC client max send message size (bytes).") - f.BoolVar(&cfg.UseGzipCompression, prefix+".grpc-use-gzip-compression", false, "Use compression when sending messages.") + f.BoolVar(&cfg.UseGzipCompression, prefix+".grpc-use-gzip-compression", false, "Deprecated: Use gzip compression when sending messages. If true, overrides grpc-compression flag.") + f.StringVar(&cfg.GRPCCompression, prefix+".grpc-compression", "", "Use compression when sending messages. Supported values are: 'gzip', 'snappy' and '' (disable compression)") f.Float64Var(&cfg.RateLimit, prefix+".grpc-client-rate-limit", 0., "Rate limit for gRPC client; 0 means disabled.") f.IntVar(&cfg.RateLimitBurst, prefix+".grpc-client-rate-limit-burst", 0, "Rate limit burst for gRPC client.") f.BoolVar(&cfg.BackoffOnRatelimits, prefix+".backoff-on-ratelimits", false, "Enable backoff and retry when we hit ratelimits.") @@ -39,13 +47,31 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { cfg.BackoffConfig.RegisterFlags(prefix, f) } +func (cfg *Config) Validate(log log.Logger) error { + if cfg.UseGzipCompression { + flagext.DeprecatedFlagsUsed.Inc() + level.Warn(log).Log("msg", "running with DEPRECATED option use_gzip_compression, use grpc_compression instead.") + } + switch cfg.GRPCCompression { + case gzip.Name, snappy.Name, "": + // valid + default: + return errors.Errorf("unsupported compression type: %s", cfg.GRPCCompression) + } + return nil +} + // CallOptions returns the config in terms of CallOptions. func (cfg *Config) CallOptions() []grpc.CallOption { var opts []grpc.CallOption opts = append(opts, grpc.MaxCallRecvMsgSize(cfg.MaxRecvMsgSize)) opts = append(opts, grpc.MaxCallSendMsgSize(cfg.MaxSendMsgSize)) + compression := cfg.GRPCCompression if cfg.UseGzipCompression { - opts = append(opts, grpc.UseCompressor("gzip")) + compression = gzip.Name + } + if compression != "" { + opts = append(opts, grpc.UseCompressor(compression)) } return opts } @@ -79,6 +105,10 @@ func (cfg *ConfigWithTLS) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet cfg.TLS.RegisterFlagsWithPrefix(prefix, f) } +func (cfg *ConfigWithTLS) Validate(log log.Logger) error { + return cfg.GRPC.Validate(log) +} + // DialOption returns the config as a grpc.DialOptions func (cfg *ConfigWithTLS) DialOption(unaryClientInterceptors []grpc.UnaryClientInterceptor, streamClientInterceptors []grpc.StreamClientInterceptor) ([]grpc.DialOption, error) { opts, err := cfg.TLS.GetGRPCDialOptions() diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go b/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go index 0dabf741fc549..20c6119675302 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go @@ -166,6 +166,17 @@ func (d MetricFamiliesPerUser) SendSumOfCountersPerUser(out chan<- prometheus.Me } } +// SendSumOfCountersPerUserWithLabels provides metrics with the provided label names on a per-user basis. This function assumes that `user` is the +// first label on the provided metric Desc +func (d MetricFamiliesPerUser) SendSumOfCountersPerUserWithLabels(out chan<- prometheus.Metric, desc *prometheus.Desc, metric string, labelNames ...string) { + for user, userMetrics := range d { + result := singleValueWithLabelsMap{} + userMetrics.sumOfSingleValuesWithLabels(metric, labelNames, counterValue, result.aggregateFn) + result.prependUserLabelValue(user) + result.WriteToMetricChannel(out, desc, prometheus.CounterValue) + } +} + func (d MetricFamiliesPerUser) GetSumOfGauges(gauge string) float64 { result := float64(0) for _, userMetrics := range d { @@ -254,6 +265,13 @@ func (d MetricFamiliesPerUser) SendSumOfSummariesWithLabels(out chan<- prometheu } } +func (d MetricFamiliesPerUser) SendSumOfSummariesPerUser(out chan<- prometheus.Metric, desc *prometheus.Desc, summaryName string) { + for user, userMetrics := range d { + data := userMetrics.SumSummaries(summaryName) + out <- data.Metric(desc, user) + } +} + func (d MetricFamiliesPerUser) SendSumOfHistograms(out chan<- prometheus.Metric, desc *prometheus.Desc, histogramName string) { hd := HistogramData{} for _, userMetrics := range d { diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go index 62a9029989189..2a7a38aa88635 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go @@ -93,17 +93,17 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.MaxLocalSeriesPerMetric, "ingester.max-series-per-metric", 50000, "The maximum number of active series per metric name, per ingester. 0 to disable.") f.IntVar(&l.MaxGlobalSeriesPerUser, "ingester.max-global-series-per-user", 0, "The maximum number of active series per user, across the cluster. 0 to disable. Supported only if -distributor.shard-by-all-labels is true.") f.IntVar(&l.MaxGlobalSeriesPerMetric, "ingester.max-global-series-per-metric", 0, "The maximum number of active series per metric name, across the cluster. 0 to disable.") - f.IntVar(&l.MinChunkLength, "ingester.min-chunk-length", 0, "Minimum number of samples in an idle chunk to flush it to the store. Use with care, if chunks are less than this size they will be discarded. This option is ignored when running the Cortex blocks storage.") + f.IntVar(&l.MinChunkLength, "ingester.min-chunk-length", 0, "Minimum number of samples in an idle chunk to flush it to the store. Use with care, if chunks are less than this size they will be discarded. This option is ignored when running the Cortex blocks storage. 0 to disable.") f.IntVar(&l.MaxLocalMetricsWithMetadataPerUser, "ingester.max-metadata-per-user", 8000, "The maximum number of active metrics with metadata per user, per ingester. 0 to disable.") f.IntVar(&l.MaxLocalMetadataPerMetric, "ingester.max-metadata-per-metric", 10, "The maximum number of metadata per metric, per ingester. 0 to disable.") f.IntVar(&l.MaxGlobalMetricsWithMetadataPerUser, "ingester.max-global-metadata-per-user", 0, "The maximum number of active metrics with metadata per user, across the cluster. 0 to disable. Supported only if -distributor.shard-by-all-labels is true.") f.IntVar(&l.MaxGlobalMetadataPerMetric, "ingester.max-global-metadata-per-metric", 0, "The maximum number of metadata per metric, across the cluster. 0 to disable.") - f.IntVar(&l.MaxChunksPerQuery, "store.query-chunk-limit", 2e6, "Maximum number of chunks that can be fetched in a single query. This limit is enforced when fetching chunks from the long-term storage.") + f.IntVar(&l.MaxChunksPerQuery, "store.query-chunk-limit", 2e6, "Maximum number of chunks that can be fetched in a single query. This limit is enforced when fetching chunks from the long-term storage. When running the Cortex chunks storage, this limit is enforced in the querier, while when running the Cortex blocks storage this limit is both enforced in the querier and store-gateway. 0 to disable.") f.DurationVar(&l.MaxQueryLength, "store.max-query-length", 0, "Limit the query time range (end - start time). This limit is enforced in the query-frontend (on the received query), in the querier (on the query possibly split by the query-frontend) and in the chunks storage. 0 to disable.") f.IntVar(&l.MaxQueryParallelism, "querier.max-query-parallelism", 14, "Maximum number of queries will be scheduled in parallel by the frontend.") - f.IntVar(&l.CardinalityLimit, "store.cardinality-limit", 1e5, "Cardinality limit for index queries. This limit is ignored when running the Cortex blocks storage.") + f.IntVar(&l.CardinalityLimit, "store.cardinality-limit", 1e5, "Cardinality limit for index queries. This limit is ignored when running the Cortex blocks storage. 0 to disable.") f.DurationVar(&l.MaxCacheFreshness, "frontend.max-cache-freshness", 1*time.Minute, "Most recent allowed cacheable result per-tenant, to prevent caching very recent results that might still be in flux.") f.StringVar(&l.PerTenantOverrideConfig, "limits.per-user-override-config", "", "File name of per-user overrides. [deprecated, use -runtime-config.file instead]") diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go index 3b071a0695f5e..bd83f6e2fbc27 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go @@ -31,6 +31,7 @@ import ( "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/runutil" "golang.org/x/sync/errgroup" + "gopkg.in/yaml.v2" ) type fetcherMetrics struct { @@ -682,6 +683,10 @@ func NewReplicaLabelRemover(logger log.Logger, replicaLabels []string) *ReplicaL // Modify modifies external labels of existing blocks, it removes given replica labels from the metadata of blocks that have it. func (r *ReplicaLabelRemover) Modify(_ context.Context, metas map[ulid.ULID]*metadata.Meta, modified *extprom.TxGaugeVec) error { + if len(r.replicaLabels) == 0 { + return nil + } + for u, meta := range metas { l := meta.Thanos.Labels for _, replicaLabel := range r.replicaLabels { @@ -795,3 +800,20 @@ func (f *IgnoreDeletionMarkFilter) Filter(ctx context.Context, metas map[ulid.UL } return nil } + +// ParseRelabelConfig parses relabel configuration. +func ParseRelabelConfig(contentYaml []byte) ([]*relabel.Config, error) { + var relabelConfig []*relabel.Config + if err := yaml.Unmarshal(contentYaml, &relabelConfig); err != nil { + return nil, errors.Wrap(err, "parsing relabel configuration") + } + supportedActions := map[relabel.Action]struct{}{relabel.Keep: {}, relabel.Drop: {}, relabel.HashMod: {}} + + for _, cfg := range relabelConfig { + if _, ok := supportedActions[cfg.Action]; !ok { + return nil, errors.Errorf("unsupported relabel action: %v", cfg.Action) + } + } + + return relabelConfig, nil +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/cacheutil/memcached_client.go b/vendor/github.com/thanos-io/thanos/pkg/cacheutil/memcached_client.go index 16e528d65e93c..ca1dff1d7343c 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/cacheutil/memcached_client.go +++ b/vendor/github.com/thanos-io/thanos/pkg/cacheutil/memcached_client.go @@ -24,9 +24,10 @@ import ( ) const ( - opSet = "set" - opGetMulti = "getmulti" - reasonMaxItemSize = "max-item-size" + opSet = "set" + opGetMulti = "getmulti" + reasonMaxItemSize = "max-item-size" + reasonAsyncBufferFull = "async-buffer-full" ) var ( @@ -233,6 +234,7 @@ func newMemcachedClient( }, []string{"operation", "reason"}) c.skipped.WithLabelValues(opGetMulti, reasonMaxItemSize) c.skipped.WithLabelValues(opSet, reasonMaxItemSize) + c.skipped.WithLabelValues(opSet, reasonAsyncBufferFull) c.duration = promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ Name: "thanos_memcached_operation_duration_seconds", @@ -276,7 +278,7 @@ func (c *memcachedClient) SetAsync(_ context.Context, key string, value []byte, return nil } - return c.enqueueAsync(func() { + err := c.enqueueAsync(func() { start := time.Now() c.operations.WithLabelValues(opSet).Inc() @@ -297,6 +299,13 @@ func (c *memcachedClient) SetAsync(_ context.Context, key string, value []byte, c.duration.WithLabelValues(opSet).Observe(time.Since(start).Seconds()) }) + + if err == errMemcachedAsyncBufferFull { + c.skipped.WithLabelValues(opSet, reasonAsyncBufferFull).Inc() + level.Debug(c.logger).Log("msg", "failed to store item to memcached because the async buffer is full", "err", err, "size", len(c.asyncQueue)) + return nil + } + return err } func (c *memcachedClient) GetMulti(ctx context.Context, keys []string) map[string][]byte { diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go index b034248fbe4d4..562a4ca336e44 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go @@ -22,6 +22,7 @@ import ( "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/tsdb" terrors "github.com/prometheus/prometheus/tsdb/errors" + "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/compact/downsample" @@ -118,7 +119,7 @@ func UntilNextDownsampling(m *metadata.Meta) (time.Duration, error) { } } -// SyncMetas synchronises local state of block metas with what we have in the bucket. +// SyncMetas synchronizes local state of block metas with what we have in the bucket. func (s *Syncer) SyncMetas(ctx context.Context) error { s.mtx.Lock() defer s.mtx.Unlock() diff --git a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go index 153f82769ede4..e80f7aac49f50 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go +++ b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go @@ -14,6 +14,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" tsdberrors "github.com/prometheus/prometheus/tsdb/errors" + "github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns" "github.com/thanos-io/thanos/pkg/extprom" ) @@ -53,7 +54,7 @@ func (t ResolverType) ToResolver(logger log.Logger) ipLookupResolver { } // NewProvider returns a new empty provider with a given resolver type. -// If empty resolver type is net.DefaultResolver.w +// If empty resolver type is net.DefaultResolver. func NewProvider(logger log.Logger, reg prometheus.Registerer, resolverType ResolverType) *Provider { p := &Provider{ resolver: NewResolver(resolverType.ToResolver(logger)), diff --git a/vendor/github.com/thanos-io/thanos/pkg/gate/gate.go b/vendor/github.com/thanos-io/thanos/pkg/gate/gate.go index 4a75df0d5ca66..a87e3af5aa7a5 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/gate/gate.go +++ b/vendor/github.com/thanos-io/thanos/pkg/gate/gate.go @@ -12,7 +12,7 @@ import ( promgate "github.com/prometheus/prometheus/pkg/gate" ) -// Gate is an interface that mimics prometheus/pkg/gate behaviour. +// Gate is an interface that mimics prometheus/pkg/gate behavior. type Gate interface { Start(ctx context.Context) error Done() diff --git a/vendor/github.com/thanos-io/thanos/pkg/objstore/swift/swift.go b/vendor/github.com/thanos-io/thanos/pkg/objstore/swift/swift.go index c11715ce4fa4e..6214579c68376 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/objstore/swift/swift.go +++ b/vendor/github.com/thanos-io/thanos/pkg/objstore/swift/swift.go @@ -19,8 +19,9 @@ import ( "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects" "github.com/gophercloud/gophercloud/pagination" "github.com/pkg/errors" - "github.com/thanos-io/thanos/pkg/objstore" "gopkg.in/yaml.v2" + + "github.com/thanos-io/thanos/pkg/objstore" ) // DirDelim is the delimiter used to model a directory structure in an object store bucket. @@ -55,12 +56,7 @@ func NewContainer(logger log.Logger, conf []byte) (*Container, error) { return nil, err } - authOpts, err := authOptsFromConfig(sc) - if err != nil { - return nil, err - } - - provider, err := openstack.AuthenticatedClient(authOpts) + provider, err := openstack.AuthenticatedClient(authOptsFromConfig(sc)) if err != nil { return nil, err } @@ -93,7 +89,7 @@ func (c *Container) Iter(ctx context.Context, dir string, f func(string) error) dir = strings.TrimSuffix(dir, DirDelim) + DirDelim } - options := &objects.ListOpts{Full: false, Prefix: dir, Delimiter: DirDelim} + options := &objects.ListOpts{Full: true, Prefix: dir, Delimiter: DirDelim} return objects.List(c.client, c.name, options).EachPage(func(page pagination.Page) (bool, error) { objectNames, err := objects.ExtractNames(page) if err != nil { @@ -120,9 +116,17 @@ func (c *Container) Get(ctx context.Context, name string) (io.ReadCloser, error) // GetRange returns a new range reader for the given object name and range. func (c *Container) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { + lowerLimit := "" + upperLimit := "" + if off >= 0 { + lowerLimit = fmt.Sprintf("%d", off) + } + if length > 0 { + upperLimit = fmt.Sprintf("%d", off+length-1) + } options := objects.DownloadOpts{ Newest: true, - Range: fmt.Sprintf("bytes=%d-%d", off, off+length-1), + Range: fmt.Sprintf("bytes=%s-%s", lowerLimit, upperLimit), } response := objects.Download(c.client, c.name, name, options) return response.Body, response.Err @@ -185,7 +189,7 @@ func parseConfig(conf []byte) (*SwiftConfig, error) { return &sc, err } -func authOptsFromConfig(sc *SwiftConfig) (gophercloud.AuthOptions, error) { +func authOptsFromConfig(sc *SwiftConfig) gophercloud.AuthOptions { authOpts := gophercloud.AuthOptions{ IdentityEndpoint: sc.AuthUrl, Username: sc.Username, @@ -229,7 +233,7 @@ func authOptsFromConfig(sc *SwiftConfig) (gophercloud.AuthOptions, error) { authOpts.Scope.ProjectID = sc.ProjectID } } - return authOpts, nil + return authOpts } func (c *Container) createContainer(name string) error { @@ -251,7 +255,7 @@ func configFromEnv() SwiftConfig { ProjectName: os.Getenv("OS_PROJECT_NAME"), UserDomainID: os.Getenv("OS_USER_DOMAIN_ID"), UserDomainName: os.Getenv("OS_USER_DOMAIN_NAME"), - ProjectDomainID: os.Getenv("OS_PROJET_DOMAIN_ID"), + ProjectDomainID: os.Getenv("OS_PROJECT_DOMAIN_ID"), ProjectDomainName: os.Getenv("OS_PROJECT_DOMAIN_NAME"), } diff --git a/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/custom.go b/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/custom.go index 98205960e468c..2d49ae72ce314 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/custom.go +++ b/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/custom.go @@ -177,6 +177,15 @@ func (r1 *Rule) Compare(r2 *Rule) int { return 0 } +func (r *RuleGroups) MarshalJSON() ([]byte, error) { + if r.Groups == nil { + // Ensure that empty slices are marshaled as '[]' and not 'null'. + return []byte(`{"groups":[]}`), nil + } + type plain RuleGroups + return json.Marshal((*plain)(r)) +} + func (m *Rule) UnmarshalJSON(entry []byte) error { decider := struct { Type string `json:"type"` @@ -219,6 +228,10 @@ func (m *Rule) MarshalJSON() ([]byte, error) { }) } a := m.GetAlert() + if a.Alerts == nil { + // Ensure that empty slices are marshaled as '[]' and not 'null'. + a.Alerts = make([]*AlertInstance, 0) + } return json.Marshal(struct { *Alert Type string `json:"type"` @@ -247,7 +260,7 @@ func (x *AlertState) UnmarshalJSON(entry []byte) error { } func (x *AlertState) MarshalJSON() ([]byte, error) { - return []byte(strconv.Quote(x.String())), nil + return []byte(strconv.Quote(strings.ToLower(x.String()))), nil } // Compare compares alert state x and y and returns: diff --git a/vendor/github.com/thanos-io/thanos/pkg/shipper/shipper.go b/vendor/github.com/thanos-io/thanos/pkg/shipper/shipper.go index 5c961f4459fc8..30496e43c0455 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/shipper/shipper.go +++ b/vendor/github.com/thanos-io/thanos/pkg/shipper/shipper.go @@ -83,8 +83,9 @@ type Shipper struct { allowOutOfOrderUploads bool } -// New creates a new shipper that detects new TSDB blocks in dir and uploads them -// to remote if necessary. It attaches the Thanos metadata section in each meta JSON file. +// New creates a new shipper that detects new TSDB blocks in dir and uploads them to +// remote if necessary. It attaches the Thanos metadata section in each meta JSON file. +// If uploadCompacted is enabled, it also uploads compacted blocks which are already in filesystem. func New( logger log.Logger, r prometheus.Registerer, @@ -92,6 +93,7 @@ func New( bucket objstore.Bucket, lbls func() labels.Labels, source metadata.SourceType, + uploadCompacted bool, allowOutOfOrderUploads bool, ) *Shipper { if logger == nil { @@ -106,40 +108,10 @@ func New( dir: dir, bucket: bucket, labels: lbls, - metrics: newMetrics(r, false), + metrics: newMetrics(r, uploadCompacted), source: source, allowOutOfOrderUploads: allowOutOfOrderUploads, - } -} - -// NewWithCompacted creates a new shipper that detects new TSDB blocks in dir and uploads them -// to remote if necessary, including compacted blocks which are already in filesystem. -// It attaches the Thanos metadata section in each meta JSON file. -func NewWithCompacted( - logger log.Logger, - r prometheus.Registerer, - dir string, - bucket objstore.Bucket, - lbls func() labels.Labels, - source metadata.SourceType, - allowOutOfOrderUploads bool, -) *Shipper { - if logger == nil { - logger = log.NewNopLogger() - } - if lbls == nil { - lbls = func() labels.Labels { return nil } - } - - return &Shipper{ - logger: logger, - dir: dir, - bucket: bucket, - labels: lbls, - metrics: newMetrics(r, true), - source: source, - uploadCompacted: true, - allowOutOfOrderUploads: allowOutOfOrderUploads, + uploadCompacted: uploadCompacted, } } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go index 33e9d68918019..f511149b0beab 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go @@ -54,13 +54,13 @@ import ( ) const ( - // maxSamplesPerChunk is approximately the max number of samples that we may have in any given chunk. This is needed + // MaxSamplesPerChunk is approximately the max number of samples that we may have in any given chunk. This is needed // for precalculating the number of samples that we may have to retrieve and decode for any given query // without downloading them. Please take a look at https://github.com/prometheus/tsdb/pull/397 to know // where this number comes from. Long story short: TSDB is made in such a way, and it is made in such a way // because you barely get any improvements in compression when the number of samples is beyond this. // Take a look at Figure 6 in this whitepaper http://www.vldb.org/pvldb/vol8/p1816-teller.pdf. - maxSamplesPerChunk = 120 + MaxSamplesPerChunk = 120 maxChunkSize = 16000 maxSeriesSize = 64 * 1024 @@ -82,6 +82,10 @@ const ( DefaultPostingOffsetInMemorySampling = 32 partitionerMaxGapSize = 512 * 1024 + + // Labels for metrics. + labelEncode = "encode" + labelDecode = "decode" ) type bucketStoreMetrics struct { @@ -191,14 +195,23 @@ func newBucketStoreMetrics(reg prometheus.Registerer) *bucketStoreMetrics { Name: "thanos_bucket_store_cached_postings_compressions_total", Help: "Number of postings compressions before storing to index cache.", }, []string{"op"}) + m.cachedPostingsCompressions.WithLabelValues(labelEncode) + m.cachedPostingsCompressions.WithLabelValues(labelDecode) + m.cachedPostingsCompressionErrors = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_bucket_store_cached_postings_compression_errors_total", Help: "Number of postings compression errors.", }, []string{"op"}) + m.cachedPostingsCompressionErrors.WithLabelValues(labelEncode) + m.cachedPostingsCompressionErrors.WithLabelValues(labelDecode) + m.cachedPostingsCompressionTimeSeconds = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "thanos_bucket_store_cached_postings_compression_time_seconds", + Name: "thanos_bucket_store_cached_postings_compression_time_seconds_total", Help: "Time spent compressing postings before storing them into postings cache.", }, []string{"op"}) + m.cachedPostingsCompressionTimeSeconds.WithLabelValues(labelEncode) + m.cachedPostingsCompressionTimeSeconds.WithLabelValues(labelDecode) + m.cachedPostingsOriginalSizeBytes = promauto.With(reg).NewCounter(prometheus.CounterOpts{ Name: "thanos_bucket_store_cached_postings_original_size_bytes_total", Help: "Original size of postings stored into cache.", @@ -240,9 +253,9 @@ type BucketStore struct { // Query gate which limits the maximum amount of concurrent queries. queryGate gate.Gate - // samplesLimiter limits the number of samples per each Series() call. - samplesLimiter SampleLimiter - partitioner partitioner + // chunksLimiterFactory creates a new limiter used to limit the number of chunks fetched by each Series() call. + chunksLimiterFactory ChunksLimiterFactory + partitioner partitioner filterConfig *FilterConfig advLabelSets []storepb.LabelSet @@ -269,7 +282,7 @@ func NewBucketStore( indexCache storecache.IndexCache, queryGate gate.Gate, maxChunkPoolBytes uint64, - maxSampleCount uint64, + chunksLimiterFactory ChunksLimiterFactory, debugLogging bool, blockSyncConcurrency int, filterConfig *FilterConfig, @@ -287,7 +300,6 @@ func NewBucketStore( return nil, errors.Wrap(err, "create chunk pool") } - metrics := newBucketStoreMetrics(reg) s := &BucketStore{ logger: logger, bkt: bkt, @@ -301,14 +313,14 @@ func NewBucketStore( blockSyncConcurrency: blockSyncConcurrency, filterConfig: filterConfig, queryGate: queryGate, - samplesLimiter: NewLimiter(maxSampleCount, metrics.queriesDropped), + chunksLimiterFactory: chunksLimiterFactory, partitioner: gapBasedPartitioner{maxGapSize: partitionerMaxGapSize}, enableCompatibilityLabel: enableCompatibilityLabel, enablePostingsCompression: enablePostingsCompression, postingOffsetsInMemSampling: postingOffsetsInMemSampling, enableSeriesResponseHints: enableSeriesResponseHints, + metrics: newBucketStoreMetrics(reg), } - s.metrics = metrics if err := os.MkdirAll(dir, 0777); err != nil { return nil, errors.Wrap(err, "create dir") @@ -649,7 +661,7 @@ func blockSeries( chunkr *bucketChunkReader, matchers []*labels.Matcher, req *storepb.SeriesRequest, - samplesLimiter SampleLimiter, + chunksLimiter ChunksLimiter, ) (storepb.SeriesSet, *queryStats, error) { ps, err := indexr.ExpandedPostings(matchers) if err != nil { @@ -722,12 +734,16 @@ func blockSeries( s.refs = append(s.refs, meta.Ref) } if len(s.chks) > 0 { + if err := chunksLimiter.Reserve(uint64(len(s.chks))); err != nil { + return nil, nil, errors.Wrap(err, "exceeded chunks limit") + } + res = append(res, s) } } // Preload all chunks that were marked in the previous stage. - if err := chunkr.preload(samplesLimiter); err != nil { + if err := chunkr.preload(); err != nil { return nil, nil, errors.Wrap(err, "preload chunks") } @@ -858,6 +874,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie g, gctx = errgroup.WithContext(ctx) resHints = &hintspb.SeriesResponseHints{} reqBlockMatchers []*labels.Matcher + chunksLimiter = s.chunksLimiterFactory(s.metrics.queriesDropped) ) if req.Hints != nil { @@ -909,7 +926,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie chunkr, blockMatchers, req, - s.samplesLimiter, + chunksLimiter, ) if err != nil { return errors.Wrapf(err, "fetch series for block %s", b.meta.ULID) @@ -941,12 +958,12 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie s.metrics.seriesDataSizeTouched.WithLabelValues("chunks").Observe(float64(stats.chunksTouchedSizeSum)) s.metrics.seriesDataSizeFetched.WithLabelValues("chunks").Observe(float64(stats.chunksFetchedSizeSum)) s.metrics.resultSeriesCount.Observe(float64(stats.mergedSeriesCount)) - s.metrics.cachedPostingsCompressions.WithLabelValues("encode").Add(float64(stats.cachedPostingsCompressions)) - s.metrics.cachedPostingsCompressions.WithLabelValues("decode").Add(float64(stats.cachedPostingsDecompressions)) - s.metrics.cachedPostingsCompressionErrors.WithLabelValues("encode").Add(float64(stats.cachedPostingsCompressionErrors)) - s.metrics.cachedPostingsCompressionErrors.WithLabelValues("decode").Add(float64(stats.cachedPostingsDecompressionErrors)) - s.metrics.cachedPostingsCompressionTimeSeconds.WithLabelValues("encode").Add(stats.cachedPostingsCompressionTimeSum.Seconds()) - s.metrics.cachedPostingsCompressionTimeSeconds.WithLabelValues("decode").Add(stats.cachedPostingsDecompressionTimeSum.Seconds()) + s.metrics.cachedPostingsCompressions.WithLabelValues(labelEncode).Add(float64(stats.cachedPostingsCompressions)) + s.metrics.cachedPostingsCompressions.WithLabelValues(labelDecode).Add(float64(stats.cachedPostingsDecompressions)) + s.metrics.cachedPostingsCompressionErrors.WithLabelValues(labelEncode).Add(float64(stats.cachedPostingsCompressionErrors)) + s.metrics.cachedPostingsCompressionErrors.WithLabelValues(labelDecode).Add(float64(stats.cachedPostingsDecompressionErrors)) + s.metrics.cachedPostingsCompressionTimeSeconds.WithLabelValues(labelEncode).Add(stats.cachedPostingsCompressionTimeSum.Seconds()) + s.metrics.cachedPostingsCompressionTimeSeconds.WithLabelValues(labelDecode).Add(stats.cachedPostingsDecompressionTimeSum.Seconds()) s.metrics.cachedPostingsOriginalSizeBytes.Add(float64(stats.cachedPostingsOriginalSizeSum)) s.metrics.cachedPostingsCompressedSizeBytes.Add(float64(stats.cachedPostingsCompressedSizeSum)) @@ -1705,7 +1722,8 @@ func (r *bucketIndexReader) fetchPostings(keys []labels.Label) ([]index.Postings // Errors from corrupted postings will be reported when postings are used. compressions++ s := time.Now() - data, err := diffVarintSnappyEncode(newBigEndianPostings(pBytes[4:])) + bep := newBigEndianPostings(pBytes[4:]) + data, err := diffVarintSnappyEncode(bep, bep.length()) compressionTime = time.Since(s) if err == nil { dataToCache = data @@ -1803,6 +1821,11 @@ func (it *bigEndianPostings) Err() error { return nil } +// Returns number of remaining postings values. +func (it *bigEndianPostings) length() int { + return len(it.list) / 4 +} + func (r *bucketIndexReader) PreloadSeries(ids []uint64) error { // Load series from cache, overwriting the list of ids to preload // with the missing ones. @@ -1943,14 +1966,15 @@ func (r *bucketIndexReader) Close() error { type bucketChunkReader struct { ctx context.Context block *bucketBlock - stats *queryStats preloads [][]uint32 - mtx sync.Mutex - chunks map[uint64]chunkenc.Chunk - // Byte slice to return to the chunk pool on close. - chunkBytes []*[]byte + // Mutex protects access to following fields, when updated from chunks-loading goroutines. + // After chunks are loaded, mutex is no longer used. + mtx sync.Mutex + chunks map[uint64]chunkenc.Chunk + stats *queryStats + chunkBytes []*[]byte // Byte slice to return to the chunk pool on close. } func newBucketChunkReader(ctx context.Context, block *bucketBlock) *bucketChunkReader { @@ -1977,19 +2001,9 @@ func (r *bucketChunkReader) addPreload(id uint64) error { } // preload all added chunk IDs. Must be called before the first call to Chunk is made. -func (r *bucketChunkReader) preload(samplesLimiter SampleLimiter) error { +func (r *bucketChunkReader) preload() error { g, ctx := errgroup.WithContext(r.ctx) - numChunks := uint64(0) - for _, offsets := range r.preloads { - for range offsets { - numChunks++ - } - } - if err := samplesLimiter.Check(numChunks * maxSamplesPerChunk); err != nil { - return errors.Wrap(err, "exceeded samples limit") - } - for seq, offsets := range r.preloads { sort.Slice(offsets, func(i, j int) bool { return offsets[i] < offsets[j] @@ -2013,21 +2027,29 @@ func (r *bucketChunkReader) preload(samplesLimiter SampleLimiter) error { return g.Wait() } +// loadChunks will read range [start, end] from the segment file with sequence number seq. +// This data range covers chunks starting at supplied offsets. func (r *bucketChunkReader) loadChunks(ctx context.Context, offs []uint32, seq int, start, end uint32) error { - begin := time.Now() + fetchBegin := time.Now() b, err := r.block.readChunkRange(ctx, seq, int64(start), int64(end-start)) if err != nil { return errors.Wrapf(err, "read range for %d", seq) } + locked := true r.mtx.Lock() - defer r.mtx.Unlock() + + defer func() { + if locked { + r.mtx.Unlock() + } + }() r.chunkBytes = append(r.chunkBytes, b) r.stats.chunksFetchCount++ r.stats.chunksFetched += len(offs) - r.stats.chunksFetchDurationSum += time.Since(begin) + r.stats.chunksFetchDurationSum += time.Since(fetchBegin) r.stats.chunksFetchedSizeSum += int(end - start) for _, o := range offs { @@ -2037,11 +2059,44 @@ func (r *bucketChunkReader) loadChunks(ctx context.Context, offs []uint32, seq i if n < 1 { return errors.New("reading chunk length failed") } - if len(cb) < n+int(l)+1 { - return errors.Errorf("preloaded chunk too small, expecting %d", n+int(l)+1) + + chunkRef := uint64(seq<<32) | uint64(o) + + // Chunk length is n (number of bytes used to encode chunk data), 1 for chunk encoding and l for actual chunk data. + // There is also crc32 after the chunk, but we ignore that. + chLen := n + 1 + int(l) + if len(cb) >= chLen { + r.chunks[chunkRef] = rawChunk(cb[n:chLen]) + continue } - cid := uint64(seq<<32) | uint64(o) - r.chunks[cid] = rawChunk(cb[n : n+int(l)+1]) + + // If we didn't fetch enough data for the chunk, fetch more. This can only really happen for last + // chunk in the list of fetched chunks, otherwise partitioner would merge fetch ranges together. + r.mtx.Unlock() + locked = false + + fetchBegin = time.Now() + + // Read entire chunk into new buffer. + nb, err := r.block.readChunkRange(ctx, seq, int64(o), int64(chLen)) + if err != nil { + return errors.Wrapf(err, "preloaded chunk too small, expecting %d, and failed to fetch full chunk", chLen) + } + + cb = *nb + if len(cb) != chLen { + return errors.Errorf("preloaded chunk too small, expecting %d", chLen) + } + + r.mtx.Lock() + locked = true + + r.chunkBytes = append(r.chunkBytes, nb) + r.stats.chunksFetchCount++ + r.stats.chunksFetchDurationSum += time.Since(fetchBegin) + r.stats.chunksFetchedSizeSum += len(cb) + + r.chunks[chunkRef] = rawChunk(cb[n:]) } return nil } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/cache/memcached.go b/vendor/github.com/thanos-io/thanos/pkg/store/cache/memcached.go index 74ce30299e13c..abd2b3659d533 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/cache/memcached.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/cache/memcached.go @@ -13,6 +13,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/pkg/labels" + "github.com/thanos-io/thanos/pkg/cacheutil" ) diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/limiter.go b/vendor/github.com/thanos-io/thanos/pkg/store/limiter.go index 5c23752d73e01..c60be901e9269 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/limiter.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/limiter.go @@ -4,20 +4,32 @@ package store import ( + "sync" + "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "go.uber.org/atomic" ) -type SampleLimiter interface { - Check(num uint64) error +type ChunksLimiter interface { + // Reserve num chunks out of the total number of chunks enforced by the limiter. + // Returns an error if the limit has been exceeded. This function must be + // goroutine safe. + Reserve(num uint64) error } +// ChunksLimiterFactory is used to create a new ChunksLimiter. The factory is useful for +// projects depending on Thanos (eg. Cortex) which have dynamic limits. +type ChunksLimiterFactory func(failedCounter prometheus.Counter) ChunksLimiter + // Limiter is a simple mechanism for checking if something has passed a certain threshold. type Limiter struct { - limit uint64 + limit uint64 + reserved atomic.Uint64 - // Counter metric which we will increase if Check() fails. + // Counter metric which we will increase if limit is exceeded. failedCounter prometheus.Counter + failedOnce sync.Once } // NewLimiter returns a new limiter with a specified limit. 0 disables the limit. @@ -25,14 +37,23 @@ func NewLimiter(limit uint64, ctr prometheus.Counter) *Limiter { return &Limiter{limit: limit, failedCounter: ctr} } -// Check checks if the passed number exceeds the limits or not. -func (l *Limiter) Check(num uint64) error { +// Reserve implements ChunksLimiter. +func (l *Limiter) Reserve(num uint64) error { if l.limit == 0 { return nil } - if num > l.limit { - l.failedCounter.Inc() - return errors.Errorf("limit %v violated (got %v)", l.limit, num) + if reserved := l.reserved.Add(num); reserved > l.limit { + // We need to protect from the counter being incremented twice due to concurrency + // while calling Reserve(). + l.failedOnce.Do(l.failedCounter.Inc) + return errors.Errorf("limit %v violated (got %v)", l.limit, reserved) } return nil } + +// NewChunksLimiterFactory makes a new ChunksLimiterFactory with a static limit. +func NewChunksLimiterFactory(limit uint64) ChunksLimiterFactory { + return func(failedCounter prometheus.Counter) ChunksLimiter { + return NewLimiter(limit, failedCounter) + } +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/local.go b/vendor/github.com/thanos-io/thanos/pkg/store/local.go index cac727d9ac7fc..e50bcf6cd0408 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/local.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/local.go @@ -17,11 +17,12 @@ import ( "github.com/pkg/errors" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/tsdb/fileutil" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "github.com/thanos-io/thanos/pkg/component" "github.com/thanos-io/thanos/pkg/runutil" "github.com/thanos-io/thanos/pkg/store/storepb" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) // LocalStore implements the store API against single file with stream of proto-based SeriesResponses in JSON format. @@ -86,10 +87,10 @@ func NewLocalStoreFromJSONMmappableFile( content = content[:idx+1] } - scanner := NewNoCopyScanner(content, split) + skanner := NewNoCopyScanner(content, split) resp := &storepb.SeriesResponse{} - for scanner.Scan() { - if err := jsonpb.Unmarshal(bytes.NewReader(scanner.Bytes()), resp); err != nil { + for skanner.Scan() { + if err := jsonpb.Unmarshal(bytes.NewReader(skanner.Bytes()), resp); err != nil { return nil, errors.Wrapf(err, "unmarshal storepb.SeriesResponse frame for file %s", path) } series := resp.GetSeries() @@ -116,7 +117,7 @@ func NewLocalStoreFromJSONMmappableFile( s.sortedChunks = append(s.sortedChunks, chks) } - if err := scanner.Err(); err != nil { + if err := skanner.Err(); err != nil { return nil, errors.Wrapf(err, "scanning file %s", path) } level.Info(logger).Log("msg", "loading JSON file succeeded", "file", path, "info", s.info.String(), "series", len(s.series)) diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/multitsdb.go b/vendor/github.com/thanos-io/thanos/pkg/store/multitsdb.go index b55535b036646..4c6f43ce3aa7f 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/multitsdb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/multitsdb.go @@ -24,14 +24,15 @@ import ( ) // MultiTSDBStore implements the Store interface backed by multiple TSDBStore instances. +// TODO(bwplotka): Remove this and use Proxy instead. Details: https://github.com/thanos-io/thanos/issues/2864 type MultiTSDBStore struct { logger log.Logger component component.SourceStoreAPI - tsdbStores func() map[string]*TSDBStore + tsdbStores func() map[string]storepb.StoreServer } // NewMultiTSDBStore creates a new MultiTSDBStore. -func NewMultiTSDBStore(logger log.Logger, _ prometheus.Registerer, component component.SourceStoreAPI, tsdbStores func() map[string]*TSDBStore) *MultiTSDBStore { +func NewMultiTSDBStore(logger log.Logger, _ prometheus.Registerer, component component.SourceStoreAPI, tsdbStores func() map[string]storepb.StoreServer) *MultiTSDBStore { if logger == nil { logger = log.NewNopLogger() } @@ -89,59 +90,70 @@ type tenantSeriesSetServer struct { ctx context.Context - warnCh warnSender - recv chan *storepb.Series - cur *storepb.Series + directCh directSender + recv chan *storepb.Series + cur *storepb.Series err error tenant string } +// TODO(bwplotka): Remove tenant awareness; keep it simple with single functionality. +// Details https://github.com/thanos-io/thanos/issues/2864. func newTenantSeriesSetServer( ctx context.Context, tenant string, - warnCh warnSender, + directCh directSender, ) *tenantSeriesSetServer { return &tenantSeriesSetServer{ - ctx: ctx, - tenant: tenant, - warnCh: warnCh, - recv: make(chan *storepb.Series), + ctx: ctx, + tenant: tenant, + directCh: directCh, + recv: make(chan *storepb.Series), } } -func (s *tenantSeriesSetServer) Context() context.Context { - return s.ctx -} +func (s *tenantSeriesSetServer) Context() context.Context { return s.ctx } -func (s *tenantSeriesSetServer) Series(store *TSDBStore, r *storepb.SeriesRequest) { +func (s *tenantSeriesSetServer) Series(store storepb.StoreServer, r *storepb.SeriesRequest) { var err error tracing.DoInSpan(s.ctx, "multitsdb_tenant_series", func(_ context.Context) { err = store.Series(r, s) }) - if err != nil { - if r.PartialResponseDisabled { + if r.PartialResponseDisabled || r.PartialResponseStrategy == storepb.PartialResponseStrategy_ABORT { s.err = errors.Wrapf(err, "get series for tenant %s", s.tenant) } else { // Consistently prefix tenant specific warnings as done in various other places. err = errors.New(prefixTenantWarning(s.tenant, err.Error())) - s.warnCh.send(storepb.NewWarnSeriesResponse(err)) + s.directCh.send(storepb.NewWarnSeriesResponse(err)) } } - close(s.recv) } func (s *tenantSeriesSetServer) Send(r *storepb.SeriesResponse) error { series := r.GetSeries() + if series == nil { + // Proxy non series responses directly to client + s.directCh.send(r) + return nil + } + + // TODO(bwplotka): Consider avoid copying / learn why it has to copied. chunks := make([]storepb.AggrChunk, len(series.Chunks)) copy(chunks, series.Chunks) - s.recv <- &storepb.Series{ + + // For series, pass it to our AggChunkSeriesSet. + select { + case <-s.ctx.Done(): + return s.ctx.Err() + case s.recv <- &storepb.Series{ Labels: series.Labels, Chunks: chunks, + }: + return nil } - return nil } func (s *tenantSeriesSetServer) Next() (ok bool) { @@ -156,29 +168,31 @@ func (s *tenantSeriesSetServer) At() ([]storepb.Label, []storepb.AggrChunk) { return s.cur.Labels, s.cur.Chunks } -func (s *tenantSeriesSetServer) Err() error { - return s.err -} +func (s *tenantSeriesSetServer) Err() error { return s.err } // Series returns all series for a requested time range and label matcher. The // returned data may exceed the requested time bounds. The data returned may // have been read and merged from multiple underlying TSDBStore instances. func (s *MultiTSDBStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { + span, ctx := tracing.StartSpan(srv.Context(), "multitsdb_series") + defer span.Finish() + stores := s.tsdbStores() if len(stores) == 0 { return nil } - var ( - g, gctx = errgroup.WithContext(srv.Context()) - span, ctx = tracing.StartSpan(gctx, "multitsdb_series") - // Allow to buffer max 10 series response. - // Each might be quite large (multi chunk long series given by sidecar). - respSender, respRecv, closeFn = newRespCh(gctx, 10) - ) - defer span.Finish() + g, gctx := errgroup.WithContext(ctx) + + // Allow to buffer max 10 series response. + // Each might be quite large (multi chunk long series given by sidecar). + respSender, respCh := newCancelableRespChannel(gctx, 10) g.Go(func() error { + // This go routine is responsible for calling store's Series concurrently. Merged results + // are passed to respCh and sent concurrently to client (if buffer of 10 have room). + // When this go routine finishes or is canceled, respCh channel is closed. + var ( seriesSet []storepb.SeriesSet wg = &sync.WaitGroup{} @@ -186,7 +200,7 @@ func (s *MultiTSDBStore) Series(r *storepb.SeriesRequest, srv storepb.Store_Seri defer func() { wg.Wait() - closeFn() + close(respCh) }() for tenant, store := range stores { @@ -202,7 +216,6 @@ func (s *MultiTSDBStore) Series(r *storepb.SeriesRequest, srv storepb.Store_Seri defer wg.Done() ss.Series(store, r) }() - seriesSet = append(seriesSet, ss) } @@ -214,13 +227,16 @@ func (s *MultiTSDBStore) Series(r *storepb.SeriesRequest, srv storepb.Store_Seri } return mergedSet.Err() }) - - for resp := range respRecv { - if err := srv.Send(resp); err != nil { - return status.Error(codes.Unknown, errors.Wrap(err, "send series response").Error()) + g.Go(func() error { + // Go routine for gathering merged responses and sending them over to client. It stops when + // respCh channel is closed OR on error from client. + for resp := range respCh { + if err := srv.Send(resp); err != nil { + return status.Error(codes.Unknown, errors.Wrap(err, "send series response").Error()) + } } - } - + return nil + }) return g.Wait() } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/postings_codec.go b/vendor/github.com/thanos-io/thanos/pkg/store/postings_codec.go index 7b1aaff477d30..8f5180663282d 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/postings_codec.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/postings_codec.go @@ -33,8 +33,9 @@ func isDiffVarintSnappyEncodedPostings(input []byte) bool { // diffVarintSnappyEncode encodes postings into diff+varint representation, // and applies snappy compression on the result. // Returned byte slice starts with codecHeaderSnappy header. -func diffVarintSnappyEncode(p index.Postings) ([]byte, error) { - buf, err := diffVarintEncodeNoHeader(p) +// Length argument is expected number of postings, used for preallocating buffer. +func diffVarintSnappyEncode(p index.Postings, length int) ([]byte, error) { + buf, err := diffVarintEncodeNoHeader(p, length) if err != nil { return nil, err } @@ -52,9 +53,16 @@ func diffVarintSnappyEncode(p index.Postings) ([]byte, error) { // diffVarintEncodeNoHeader encodes postings into diff+varint representation. // It doesn't add any header to the output bytes. -func diffVarintEncodeNoHeader(p index.Postings) ([]byte, error) { +// Length argument is expected number of postings, used for preallocating buffer. +func diffVarintEncodeNoHeader(p index.Postings, length int) ([]byte, error) { buf := encoding.Encbuf{} + // This encoding uses around ~1 bytes per posting, but let's use + // conservative 1.25 bytes per posting to avoid extra allocations. + if length > 0 { + buf.B = make([]byte, 0, 5*length/4) + } + prev := uint64(0) for p.Next() { v := p.At() diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go b/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go index 882c32fead31b..83c7f8120fd68 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go @@ -10,7 +10,6 @@ import ( "fmt" "io" "io/ioutil" - "math" "net/http" "net/url" "path" @@ -27,6 +26,9 @@ import ( "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/storage/remote" "github.com/prometheus/prometheus/tsdb/chunkenc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "github.com/thanos-io/thanos/pkg/component" thanoshttp "github.com/thanos-io/thanos/pkg/http" "github.com/thanos-io/thanos/pkg/promclient" @@ -34,8 +36,6 @@ import ( "github.com/thanos-io/thanos/pkg/store/storepb" "github.com/thanos-io/thanos/pkg/store/storepb/prompb" "github.com/thanos-io/thanos/pkg/tracing" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) // PrometheusStore implements the store node API on top of the Prometheus remote read API. @@ -239,10 +239,7 @@ func (p *PrometheusStore) handleSampledPrometheusResponse(s storepb.Store_Series continue } - // XOR encoding supports a max size of 2^16 - 1 samples, so we need - // to chunk all samples into groups of no more than 2^16 - 1 - // See: https://github.com/thanos-io/thanos/pull/718. - aggregatedChunks, err := p.chunkSamples(e, math.MaxUint16) + aggregatedChunks, err := p.chunkSamples(e, MaxSamplesPerChunk) if err != nil { return err } @@ -422,6 +419,7 @@ func (p *PrometheusStore) startPromRemoteRead(ctx context.Context, q *prompb.Que } preq.Header.Add("Content-Encoding", "snappy") preq.Header.Set("Content-Type", "application/x-stream-protobuf") + preq.Header.Set("X-Prometheus-Remote-Read-Version", "0.1.0") preq.Header.Set("User-Agent", thanoshttp.ThanosUserAgent) tracing.DoInSpan(ctx, "query_prometheus_request", func(ctx context.Context) { @@ -477,7 +475,8 @@ func matchesExternalLabels(ms []storepb.LabelMatcher, externalLabels labels.Labe } // encodeChunk translates the sample pairs into a chunk. -func (p *PrometheusStore) encodeChunk(ss []prompb.Sample) (storepb.Chunk_Encoding, []byte, error) { +// TODO(kakkoyun): Linter - result 0 (github.com/thanos-io/thanos/pkg/store/storepb.Chunk_Encoding) is always 0. +func (p *PrometheusStore) encodeChunk(ss []prompb.Sample) (storepb.Chunk_Encoding, []byte, error) { //nolint:unparam c := chunkenc.NewXORChunk() a, err := c.Appender() diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go b/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go index b5c310920edb4..67ae5a908f1b5 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go @@ -184,18 +184,23 @@ func mergeLabels(a []storepb.Label, b labels.Labels) []storepb.Label { return res } -type ctxRespSender struct { +// cancelableRespSender is a response channel that does need to be exhausted on cancel. +type cancelableRespSender struct { ctx context.Context ch chan<- *storepb.SeriesResponse } -func newRespCh(ctx context.Context, buffer int) (*ctxRespSender, <-chan *storepb.SeriesResponse, func()) { +func newCancelableRespChannel(ctx context.Context, buffer int) (*cancelableRespSender, chan *storepb.SeriesResponse) { respCh := make(chan *storepb.SeriesResponse, buffer) - return &ctxRespSender{ctx: ctx, ch: respCh}, respCh, func() { close(respCh) } + return &cancelableRespSender{ctx: ctx, ch: respCh}, respCh } -func (s ctxRespSender) send(r *storepb.SeriesResponse) { - s.ch <- r +// send or return on cancel. +func (s cancelableRespSender) send(r *storepb.SeriesResponse) { + select { + case <-s.ctx.Done(): + case s.ch <- r: + } } // Series returns all series for a requested time range and label matcher. Requested series are taken from other @@ -213,15 +218,17 @@ func (s *ProxyStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesSe return status.Error(codes.InvalidArgument, errors.New("no matchers specified (excluding external labels)").Error()) } - var ( - g, gctx = errgroup.WithContext(srv.Context()) + g, gctx := errgroup.WithContext(srv.Context()) - // Allow to buffer max 10 series response. - // Each might be quite large (multi chunk long series given by sidecar). - respSender, respRecv, closeFn = newRespCh(gctx, 10) - ) + // Allow to buffer max 10 series response. + // Each might be quite large (multi chunk long series given by sidecar). + respSender, respCh := newCancelableRespChannel(gctx, 10) g.Go(func() error { + // This go routine is responsible for calling store's Series concurrently. Merged results + // are passed to respCh and sent concurrently to client (if buffer of 10 have room). + // When this go routine finishes or is canceled, respCh channel is closed. + var ( seriesSet []storepb.SeriesSet storeDebugMsgs []string @@ -239,7 +246,7 @@ func (s *ProxyStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesSe defer func() { wg.Wait() - closeFn() + close(respCh) }() for _, st := range s.stores() { @@ -294,6 +301,10 @@ func (s *ProxyStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesSe return nil } + // TODO(bwplotka): Currently we stream into big frames. Consider ensuring 1MB maximum. + // This however does not matter much when used with QueryAPI. Matters for federated Queries a lot. + // https://github.com/thanos-io/thanos/issues/2332 + // Series are not necessarily merged across themselves. mergedSet := storepb.MergeSeriesSets(seriesSet...) for mergedSet.Next() { var series storepb.Series @@ -302,21 +313,25 @@ func (s *ProxyStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesSe } return mergedSet.Err() }) - - for resp := range respRecv { - if err := srv.Send(resp); err != nil { - return status.Error(codes.Unknown, errors.Wrap(err, "send series response").Error()) + g.Go(func() error { + // Go routine for gathering merged responses and sending them over to client. It stops when + // respCh channel is closed OR on error from client. + for resp := range respCh { + if err := srv.Send(resp); err != nil { + return status.Error(codes.Unknown, errors.Wrap(err, "send series response").Error()) + } } - } - + return nil + }) if err := g.Wait(); err != nil { + // TODO(bwplotka): Replace with request logger. level.Error(s.logger).Log("err", err) return err } return nil } -type warnSender interface { +type directSender interface { send(*storepb.SeriesResponse) } @@ -327,7 +342,7 @@ type streamSeriesSet struct { logger log.Logger stream storepb.Store_SeriesClient - warnCh warnSender + warnCh directSender currSeries *storepb.Series recvCh chan *storepb.Series @@ -363,7 +378,7 @@ func startStreamSeriesSet( closeSeries context.CancelFunc, wg *sync.WaitGroup, stream storepb.Store_SeriesClient, - warnCh warnSender, + warnCh directSender, name string, partialResponse bool, responseTimeout time.Duration, diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go b/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go index b06cb3ac3efd1..70454a39f5069 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go @@ -12,7 +12,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -23,12 +23,17 @@ import ( "github.com/thanos-io/thanos/pkg/store/storepb" ) +type TSDBReader interface { + storage.Queryable + StartTime() (int64, error) +} + // TSDBStore implements the store API against a local TSDB instance. // It attaches the provided external labels to all results. It only responds with raw data // and does not support downsampling. type TSDBStore struct { logger log.Logger - db *tsdb.DB + db TSDBReader component component.StoreAPI externalLabels labels.Labels } @@ -40,7 +45,7 @@ type ReadWriteTSDBStore struct { } // NewTSDBStore creates a new TSDBStore. -func NewTSDBStore(logger log.Logger, _ prometheus.Registerer, db *tsdb.DB, component component.StoreAPI, externalLabels labels.Labels) *TSDBStore { +func NewTSDBStore(logger log.Logger, _ prometheus.Registerer, db TSDBReader, component component.StoreAPI, externalLabels labels.Labels) *TSDBStore { if logger == nil { logger = log.NewNopLogger() } @@ -54,15 +59,17 @@ func NewTSDBStore(logger log.Logger, _ prometheus.Registerer, db *tsdb.DB, compo // Info returns store information about the Prometheus instance. func (s *TSDBStore) Info(_ context.Context, _ *storepb.InfoRequest) (*storepb.InfoResponse, error) { + minTime, err := s.db.StartTime() + if err != nil { + return nil, errors.Wrap(err, "TSDB min Time") + } + res := &storepb.InfoResponse{ Labels: make([]storepb.Label, 0, len(s.externalLabels)), StoreType: s.component.ToProto(), - MinTime: 0, + MinTime: minTime, MaxTime: math.MaxInt64, } - if blocks := s.db.Blocks(); len(blocks) > 0 { - res.MinTime = blocks[0].Meta().MinTime - } for _, l := range s.externalLabels { res.Labels = append(res.Labels, storepb.Label{ Name: l.Name, @@ -120,12 +127,7 @@ func (s *TSDBStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesSer if !r.SkipChunks { // TODO(fabxc): An improvement over this trivial approach would be to directly // use the chunks provided by TSDB in the response. - // But since the sidecar has a similar approach, optimizing here has only - // limited benefit for now. - // NOTE: XOR encoding supports a max size of 2^16 - 1 samples, so we need - // to chunk all samples into groups of no more than 2^16 - 1 - // See: https://github.com/thanos-io/thanos/pull/1038. - c, err := s.encodeChunks(series.Iterator(), math.MaxUint16) + c, err := s.encodeChunks(series.Iterator(), MaxSamplesPerChunk) if err != nil { return status.Errorf(codes.Internal, "encode chunk: %s", err) } diff --git a/vendor/modules.txt b/vendor/modules.txt index f2bd6e99edfc1..cce1f29cf69f7 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -158,7 +158,7 @@ github.com/coreos/go-systemd/journal github.com/coreos/go-systemd/sdjournal # github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f github.com/coreos/pkg/capnslog -# github.com/cortexproject/cortex v1.2.1-0.20200727121049-4cfa4a2978c2 +# github.com/cortexproject/cortex v1.2.1-0.20200803161316-7014ff11ed70 ## explicit github.com/cortexproject/cortex/pkg/alertmanager github.com/cortexproject/cortex/pkg/alertmanager/alerts @@ -230,6 +230,7 @@ github.com/cortexproject/cortex/pkg/util/extract github.com/cortexproject/cortex/pkg/util/fakeauth github.com/cortexproject/cortex/pkg/util/flagext github.com/cortexproject/cortex/pkg/util/grpc +github.com/cortexproject/cortex/pkg/util/grpc/encoding/snappy github.com/cortexproject/cortex/pkg/util/grpc/healthcheck github.com/cortexproject/cortex/pkg/util/grpcclient github.com/cortexproject/cortex/pkg/util/limiter @@ -790,7 +791,7 @@ github.com/stretchr/objx github.com/stretchr/testify/assert github.com/stretchr/testify/mock github.com/stretchr/testify/require -# github.com/thanos-io/thanos v0.13.1-0.20200625180332-f078faed1b96 +# github.com/thanos-io/thanos v0.13.1-0.20200731083140-69b87607decf github.com/thanos-io/thanos/pkg/block github.com/thanos-io/thanos/pkg/block/indexheader github.com/thanos-io/thanos/pkg/block/metadata @@ -1124,7 +1125,7 @@ google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.29.1 +# google.golang.org/grpc v1.30.0 => google.golang.org/grpc v1.29.1 ## explicit google.golang.org/grpc google.golang.org/grpc/attributes @@ -1437,3 +1438,4 @@ sigs.k8s.io/yaml # k8s.io/client-go => k8s.io/client-go v0.18.3 # github.com/satori/go.uuid => github.com/satori/go.uuid v1.2.0 # github.com/gocql/gocql => github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85 +# google.golang.org/grpc => google.golang.org/grpc v1.29.1