diff --git a/go.mod b/go.mod index 50e844e96f451..f7b977a28c64f 100644 --- a/go.mod +++ b/go.mod @@ -7,10 +7,9 @@ require ( github.com/Microsoft/go-winio v0.4.12 // indirect github.com/blang/semver v3.5.1+incompatible // indirect github.com/bmatcuk/doublestar v1.1.1 - github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737 // indirect github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 // indirect github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e - github.com/cortexproject/cortex v0.2.0 + github.com/cortexproject/cortex v0.2.1-0.20191003165238-857bb8476e59 github.com/davecgh/go-spew v1.1.1 github.com/docker/distribution v2.7.1+incompatible // indirect github.com/docker/docker v0.0.0-20190607191414-238f8eaa31aa @@ -40,11 +39,11 @@ require ( github.com/pkg/errors v0.8.1 github.com/prometheus/client_golang v1.1.0 github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 - github.com/prometheus/common v0.6.0 - github.com/prometheus/prometheus v0.0.0-20190818123050-43acd0e2e93f + github.com/prometheus/common v0.7.0 + github.com/prometheus/prometheus v1.8.2-0.20190918104050-8744afdd1ea0 github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd - github.com/stretchr/testify v1.3.0 + github.com/stretchr/testify v1.4.0 github.com/tonistiigi/fifo v0.0.0-20190226154929-a9fb20d87448 github.com/ugorji/go v1.1.7 // indirect github.com/weaveworks/common v0.0.0-20190822150010-afb9996716e4 @@ -60,7 +59,6 @@ require ( google.golang.org/genproto v0.0.0-20190916214212-f660b8655731 // indirect google.golang.org/grpc v1.23.1 gopkg.in/alecthomas/kingpin.v2 v2.2.6 - gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect gopkg.in/fsnotify.v1 v1.4.7 gopkg.in/yaml.v2 v2.2.2 gotest.tools v2.2.0+incompatible // indirect diff --git a/go.sum b/go.sum index e9a321b26aae0..a04ce04557a21 100644 --- a/go.sum +++ b/go.sum @@ -1,19 +1,12 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.28.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.35.0/go.mod h1:UE4juzxiHpKLbqrOrwVrKuaZvUtLA9CSnaYO+y53jxA= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1 h1:7gXaI3V/b4DRaK++rTqhRajcT7z8gtP0qKMZTXqlySM= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= contrib.go.opencensus.io/exporter/ocagent v0.6.0 h1:Z1n6UAyr0QwM284yUuh5Zd8JlvxUGAhFZcgMJkMPrGM= contrib.go.opencensus.io/exporter/ocagent v0.6.0/go.mod h1:zmKjrJcdo0aYcVS7bmEeSEBLPA9YJp5bjrofdU3pIXs= contrib.go.opencensus.io/exporter/stackdriver v0.6.0/go.mod h1:QeFzMJDAw8TXt5+aRaSuE8l5BwaMIOIlaVkBOPRuMuw= -dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= -dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= -dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= -dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= git.apache.org/thrift.git v0.0.0-20180924222215-a9235805469b/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/Azure/azure-sdk-for-go v23.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= @@ -42,18 +35,12 @@ github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v0.0.0-20180725035823-b12b22c5341f/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= @@ -62,16 +49,15 @@ github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQh github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.15.24/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= -github.com/aws/aws-sdk-go v1.15.90/go.mod h1:es1KtYUFs7le0xQ3rOihkuoVD90z7D0fR2Qm4S00/gU= github.com/aws/aws-sdk-go v1.22.4 h1:Mcq67g9mZEBvBuj/x7mF9KCyw5M8/4I/cjQPkdCsq0I= github.com/aws/aws-sdk-go v1.22.4/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.23.12 h1:2UnxgNO6Y5J1OrkXS8XNp0UatDxD1bWHiDT62RDPggI= +github.com/aws/aws-sdk-go v1.23.12/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/biogo/store v0.0.0-20160505134755-913427a1d5e8/go.mod h1:Iev9Q3MErcn+w3UOJD/DkEzllvugfdx7bGcMOFhvr/4= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= @@ -81,27 +67,18 @@ github.com/bmatcuk/doublestar v1.1.1 h1:YroD6BJCZBYx06yYFEWvUuKVWQn3vLLQAVmDmvTS github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= -github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= -github.com/bradfitz/gomemcache v0.0.0-20170208213004-1952afaa557d/go.mod h1:PmM6Mmwb0LSuEubjR8N7PtNe1KxZLtOUHtbeikc5h60= -github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737 h1:rRISKWyXfVxvoa702s91Zl5oREZTrR3yv+tXrrX7G/g= -github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737/go.mod h1:PmM6Mmwb0LSuEubjR8N7PtNe1KxZLtOUHtbeikc5h60= -github.com/cenk/backoff v2.0.0+incompatible/go.mod h1:7FtoeaSnHoZnmZzz47cM35Y9nSW7tNyaidugnHTaFDE= +github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668 h1:U/lr3Dgy4WK+hNk4tyD+nuGjpVLPEHuJSFXMw11/HPA= +github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v1.0.0/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20180905225744-ee1a9a0726d2/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= github.com/cespare/xxhash v0.0.0-20181017004759-096ff4a8a059/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/cockroachdb/cmux v0.0.0-20170110192607-30d10be49292/go.mod h1:qRiX68mZX1lGBkTWyp3CLcenw9I94W2dLeRvMzcn9N4= -github.com/cockroachdb/cockroach v0.0.0-20170608034007-84bc9597164f/go.mod h1:xeT/CQ0qZHangbYbWShlCGAx31aV4AjGswDUjhKS6HQ= -github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= github.com/cockroachdb/datadriven v0.0.0-20190531201743-edce55837238/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= @@ -109,7 +86,6 @@ github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 h1:PUD50EuOMkXVcpBIA/R95d56duJR9VxhwncsFbNnxW4= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/coreos/etcd v3.3.12+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -120,8 +96,8 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7 github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cortexproject/cortex v0.2.0 h1:L+PNIHJqCNfloQDEO95Pexwy289pFbiNKlV63hRIGQU= -github.com/cortexproject/cortex v0.2.0/go.mod h1:yus6tMOS/aHe69Hk3E5chVyHWSijx+XCUFkI8676IAI= +github.com/cortexproject/cortex v0.2.1-0.20191003165238-857bb8476e59 h1:DPfMYL5cV21JIaFtf64szezjkopANcwiQmeMZVCbStg= +github.com/cortexproject/cortex v0.2.1-0.20191003165238-857bb8476e59/go.mod h1:XLeoQLsfseLmVzRpZ6MIuoUOTAC979R7WSdBdnwe800= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg= @@ -140,7 +116,6 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= @@ -159,22 +134,15 @@ github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZ github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elastic/gosigar v0.9.0/go.mod h1:cdorVVzy1fhmEqmtgqkoE3bYtCfSCkVyjTyCIo22xvs= -github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= @@ -182,18 +150,14 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv github.com/fluent/fluent-bit-go v0.0.0-20190925192703-ea13c021720c h1:QwbffUs/+ptC4kTFPEN9Ej2latTq3bZJ5HO/OwPXYMs= github.com/fluent/fluent-bit-go v0.0.0-20190925192703-ea13c021720c/go.mod h1:WQX+afhrekY9rGK+WT4xvKSlzmia9gDoLYu4GGYGASQ= github.com/fluent/fluent-logger-golang v1.2.1/go.mod h1:2/HCT/jTy78yGyeNGQLGQsjF3zzzAuy6Xlk6FCMV5eU= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsouza/fake-gcs-server v1.3.0 h1:f2mbomatUsbw8NRY7rzqiiWNn4BRM+Jredz0Pt70Usg= github.com/fsouza/fake-gcs-server v1.3.0/go.mod h1:Lq+43m2znsXfDKHnQMfdA0HpYYAEJsfizsbpk5k3TLo= -github.com/getsentry/raven-go v0.1.2/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= @@ -201,7 +165,6 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.17.2/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -250,7 +213,6 @@ github.com/go-openapi/swag v0.19.4/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh github.com/go-openapi/validate v0.17.2/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gocql/gocql v0.0.0-20180113133114-697e7c57f99b/go.mod h1:GjP0ITc4WbMO5dEWwikPqq0ZWbloAO6CO6g0VAK7tTc= @@ -261,7 +223,6 @@ github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFG github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 h1:X+zN6RZXsvnrSJaAIQhZezPfAfvsqihKKR8oiLHid34= @@ -273,7 +234,6 @@ github.com/gogo/status v1.0.3/go.mod h1:SavQ51ycCLnc7dGyJxp8YAmudx8xqiVrRf+6IXRs github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20180924190550-6f2cf27854a4/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= @@ -289,6 +249,8 @@ github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8l github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0= +github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -298,15 +260,12 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/pprof v0.0.0-20180605153948-8b03ce837f34/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -316,21 +275,16 @@ github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= -github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.3.0 h1:CcQijm0XKekKjP/YCz28LXVSpgguuB+nCxaSjCe09y0= github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= -github.com/gophercloud/gophercloud v0.0.0-20190301152420-fca40860790e/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gophercloud/gophercloud v0.0.0-20190307220656-fe1ba5ce12dd/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/gophercloud v0.3.0 h1:6sjpKIpVwRIIwmcEGp+WwNovNsem+c+2vm6oxshRpL8= github.com/gophercloud/gophercloud v0.3.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= @@ -341,14 +295,11 @@ github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoA github.com/grafana/tail v0.0.0-20190416211555-5ab7a9e9e921 h1:5yr/cfHMkL0ZrVKwXVwQ6cJL/xGb7S7ZRXLiTlzOKDc= github.com/grafana/tail v0.0.0-20190416211555-5ab7a9e9e921/go.mod h1:aS6CMYGLEIABOzX3OL8SqZ3zAZCGN7nmBnqgnyJGxyA= github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.4.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.4/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.6 h1:8p0pcgLlw2iuZVsdHdPaMUXFOA+6gDixcXbHEMzSyW8= @@ -371,8 +322,6 @@ github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjh github.com/hashicorp/go-immutable-radix v1.1.0 h1:vN9wG1D6KG6YHRTWr8512cxGOVgTMEfgEdSj/hr8MPc= github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-msgpack v0.5.4 h1:SFT72YqIkOcLdWJUYcriVX7hbrZpwc/f7h8aW2NUqrA= -github.com/hashicorp/go-msgpack v0.5.4/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= @@ -406,14 +355,8 @@ github.com/hashicorp/serf v0.8.3 h1:MWYcmct5EtKz0efYooPcL0yNkem+7kWxqXDi/UIh+8k= github.com/hashicorp/serf v0.8.3/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb v0.0.0-20170331210902-15e594fc09f1/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= github.com/influxdata/influxdb v1.7.7/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= -github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= -github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= -github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jessevdk/go-flags v0.0.0-20180331124232-1c38ed7ad0cc/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= @@ -428,7 +371,6 @@ github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62F github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= @@ -437,7 +379,6 @@ github.com/klauspost/compress v1.7.4 h1:4UqAIzZ1Ns2epCTyJ1d2xMWvxtX+FNSCYWeOFogK github.com/klauspost/compress v1.7.4/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/knz/strtime v0.0.0-20181018220328-af2256ee352c/go.mod h1:4ZxfWkxwtc7dBeifERVVWRy9F9rTU9p0yCDgeCtlius= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -447,7 +388,6 @@ github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.0.0/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -456,7 +396,6 @@ github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LE github.com/lann/builder v0.0.0-20150808151131-f22ce00fd939/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lightstep/lightstep-tracer-go v0.15.6/go.mod h1:6AMpwZpsyCFwSovxzM78e+AsYxE8sGwiM6C3TytaWeI= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -469,14 +408,11 @@ github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.10/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.15 h1:CSSIDtllwGLMoA6zjdKnaE6Tx6eVUxQ29LUgGetiDCI= github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= @@ -492,7 +428,6 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -500,7 +435,6 @@ github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lN github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8dQu6DMTwH4oIuGN8GJDAlqDdVE= github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= @@ -508,24 +442,18 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= -github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v0.0.0-20170117200651-66bb6560562f/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= @@ -534,25 +462,18 @@ github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVo github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02 h1:0R5mDLI66Qw13qN80TRz85zthQ2nf2+uDyiV23w6c3Q= github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc= -github.com/opentracing-contrib/go-stdlib v0.0.0-20170113013457-1de4cc2120e7/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9 h1:QsgXACQhd9QJhEmRumbsMQQvBtmdS0mafoVEBplWXEg= github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/peterbourgon/g2s v0.0.0-20170223122336-d4e7ad98afea/go.mod h1:1VcHEd3ro4QMoHfiNl/j7Jkln9+KQuorp0PItHMJYNg= -github.com/petermattis/goid v0.0.0-20170504144140-0ded85884ba5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= github.com/philhofer/fwd v0.0.0-20160129035939-98c11a7a6ec8/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -560,58 +481,50 @@ github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/alertmanager v0.15.1/go.mod h1:zdz6eCci7rHWB/8/1E/9JEfoKqCAIlxmt8EIKvHi0dI= github.com/prometheus/alertmanager v0.18.0/go.mod h1:WcxHBl40VSPuOaqWae6l6HpnEOVRIycEJ7i9iYkadEE= +github.com/prometheus/alertmanager v0.19.0/go.mod h1:Eyp94Yi/T+kdeb2qvq66E3RGuph5T/jm/RBVh4yz1xo= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20180920065004-418d78d0b9a7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/prometheus v0.0.0-20180315085919-58e2a31db8de/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= -github.com/prometheus/prometheus v0.0.0-20190731144842-63ed2e28f1ac h1:nDONJ/OeNXNq7zDXYjuoPsvzf1WoT4cz9V32LsZE7Lo= -github.com/prometheus/prometheus v0.0.0-20190731144842-63ed2e28f1ac/go.mod h1:0nIafMIZWDF3P7oq1Xf8HKRZ0I37hKmhYEAupBEaa4A= github.com/prometheus/prometheus v0.0.0-20190818123050-43acd0e2e93f h1:7C9G4yUogM8QP85pmf11vlBPuV6u2mPbqvbjPVKcNis= github.com/prometheus/prometheus v0.0.0-20190818123050-43acd0e2e93f/go.mod h1:rMTlmxGCvukf2KMu3fClMDKLLoJ5hl61MhcJ7xKakf0= -github.com/prometheus/tsdb v0.9.1 h1:IWaAmWkYlgG7/S4iw4IpAQt5Y35QaZM6/GsZ7GsjAuk= -github.com/prometheus/tsdb v0.9.1/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rlmcpherson/s3gof3r v0.5.0/go.mod h1:s7vv7SMDPInkitQMuZzH615G7yWHdrU2r/Go7Bo71Rs= +github.com/prometheus/prometheus v1.8.2-0.20190918104050-8744afdd1ea0 h1:W4dTblzSVIBNfDimJhh70OpZQQMwLVpwK50scXdH94w= +github.com/prometheus/prometheus v1.8.2-0.20190918104050-8744afdd1ea0/go.mod h1:elNqjVbwD3sCZJqKzyN7uEuwGcCpeJvv67D6BrHsDbw= +github.com/rafaeljusto/redigomock v0.0.0-20190202135759-257e089e14a1 h1:+kGqA4dNN5hn7WwvKdzHl0rdN5AEkbNZd0VjRltAiZg= +github.com/rafaeljusto/redigomock v0.0.0-20190202135759-257e089e14a1/go.mod h1:JaY6n2sDr+z2WTsXkOmNRUfDy6FN0L6Nk7x06ndm4tY= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/rubyist/circuitbreaker v2.2.1+incompatible/go.mod h1:Ycs3JgJADPuzJDwffe12k6BZT8hxVi6lFK+gWYJLN4A= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samuel/go-zookeeper v0.0.0-20161028232340-1d7be4effb13/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75 h1:cA+Ubq9qEVIQhIWvP2kNuSZ2CmnfBJFSRq+kO1pu2cc= github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sandlis/weaveworks-common v0.0.0-20190822064708-8fa0a1ca9d89 h1:Dop0x86c8v6sOhFX8JPHP2TQ4NdvMgtwAnzOxmK06Ww= github.com/sandlis/weaveworks-common v0.0.0-20190822064708-8fa0a1ca9d89/go.mod h1:Gpi+45Cxn+Urtspw4hzsAtdiXjxBVdt3lipoh4aa/AU= -github.com/sasha-s/go-deadlock v0.0.0-20161201235124-341000892f3d/go.mod h1:StQn567HiB1fF2yJ44N9au7wOhrPS3iZqiDbRupzT10= github.com/satori/go.uuid v0.0.0-20160603004225-b111a074d5ef/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= @@ -619,45 +532,18 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e/go.mod h1:tm/wZFQ8e24NYaBGIlnO2WGCAi67re4HHuOm0sftE/M= github.com/sercand/kuberesolver v2.1.0+incompatible h1:iJ1oCzPQ/aacsbCWLfJW1hPKkHMvCEgNSA9kvWcb9MY= github.com/sercand/kuberesolver v2.1.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= -github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= -github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= -github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= -github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= -github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= -github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= -github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= -github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= -github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= -github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= -github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= -github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= -github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= -github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= -github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= github.com/shurcooL/vfsgen v0.0.0-20180825020608-02ddb050ef6b/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd h1:ug7PpSOB5RBPK1Kg6qskGBoP3Vnj/aNYFTznWvlkGo0= github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= -github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= -github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -675,7 +561,8 @@ github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRci github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tinylib/msgp v0.0.0-20161221055906-38a6f61a768d/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -711,9 +598,6 @@ go.etcd.io/etcd v0.0.0-20190815204525-8f85f0dc2607/go.mod h1:tQYIqsNuGzkF9ncfEto go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.0.4/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.17.0/go.mod h1:mp1VrMQxhlqqDpKvH4UcQUa4YwlzNmymAjPrDdfxNpI= -go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.1 h1:8dP3SGL7MPB94crU3bEPplMPe83FI4EouesJUeFHv50= @@ -726,18 +610,13 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= -golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -756,13 +635,10 @@ golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -777,20 +653,16 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180925112736-b09afc3d579e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -798,6 +670,8 @@ golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180805044716-cb6730876b98/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -816,7 +690,6 @@ golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180924175601-e93be7f42f9f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190118193359-16909d206f00/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -837,10 +710,6 @@ golang.org/x/tools v0.0.0-20190925134113-a044388aa56f/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20180921000521-920bb1beccf7/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -848,7 +717,6 @@ google.golang.org/api v0.10.0 h1:7tmAxx3oKE98VMZ+SBZzvYYWRQ9HODBxmC8mXUsraSQ= google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -858,9 +726,6 @@ google.golang.org/genproto v0.0.0-20180608181217-32ee49c4dd80/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180924164928-221a8d4f7494/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= -google.golang.org/genproto v0.0.0-20190110221437-6909d8a4a91b/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -871,10 +736,7 @@ google.golang.org/genproto v0.0.0-20190916214212-f660b8655731 h1:Phvl0+G5t5k/EUF google.golang.org/genproto v0.0.0-20190916214212-f660b8655731/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.15.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= -google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -892,7 +754,6 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/fsnotify/fsnotify.v1 v1.3.1/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= gopkg.in/fsnotify/fsnotify.v1 v1.4.7 h1:XNNYLJHt73EyYiCZi6+xjupS9CpvmiDgjPTAjrBlQbo= gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= @@ -908,18 +769,15 @@ gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= k8s.io/api v0.0.0-20190620084959-7cf5895f2711/go.mod h1:TBhBqb1AWbBQbW3XRusr7n7E4v2+5ZY8r8sAMnyFC5A= k8s.io/api v0.0.0-20190813020757-36bff7324fb7 h1:4uJOjRn9kWq4AqJRE8+qzmAy+lJd9rh8TY455dNef4U= k8s.io/api v0.0.0-20190813020757-36bff7324fb7/go.mod h1:3Iy+myeAORNCLgjd/Xu9ebwN7Vh59Bw0vh9jhoX+V58= -k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719/go.mod h1:I4A+glKBHiTgiEjQiCCQfCAIcIMFGt291SmsvcrFzJA= k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010 h1:pyoq062NftC1y/OcnbSvgolyZDJ8y4fmUPWMkdA6gfU= k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010/go.mod h1:Waf/xTS2FGRrgXCkO5FP3XxTOWh0qLf2QhL1qFZZ/R8= @@ -931,13 +789,11 @@ k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.4.0 h1:lCJCxf/LIowc2IGS9TPjWDyXY4nOmdGdfcwwDQCOURQ= k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-openapi v0.0.0-20180629012420-d83b052f768a/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= k8s.io/kube-openapi v0.0.0-20190722073852-5e22f3d471e6 h1:s9IxTKe9GwDH0S/WaX62nFYr0or32DsTWex9AileL7U= k8s.io/kube-openapi v0.0.0-20190722073852-5e22f3d471e6/go.mod h1:RZvgC8MSN6DjiMV6oIfEE9pDL9CYXokkfaCKZeHm3nc= k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= -k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= k8s.io/utils v0.0.0-20190809000727-6c36bc71fc4a h1:uy5HAgt4Ha5rEMbhZA+aM1j2cq5LmR6LQ71EYC2sVH4= k8s.io/utils v0.0.0-20190809000727-6c36bc71fc4a/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= @@ -945,5 +801,3 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= -sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 6b9baaea1c2a4..748689e43caa3 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -232,14 +232,17 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log return nil, httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (%d) exceeded while adding %d lines", int(limiter.Limit()), lineCount) } - replicationSets, err := d.ring.BatchGet(keys, ring.Write) - if err != nil { - return nil, err - } + const maxExpectedReplicationSet = 5 // typical replication factor 3 plus one for inactive plus one for luck + var descs [maxExpectedReplicationSet]ring.IngesterDesc samplesByIngester := map[string][]*streamTracker{} ingesterDescs := map[string]ring.IngesterDesc{} - for i, replicationSet := range replicationSets { + for i, key := range keys { + replicationSet, err := d.ring.Get(key, ring.Write, descs[:0]) + if err != nil { + return nil, err + } + streams[i].minSuccess = len(replicationSet.Ingesters) - replicationSet.MaxErrors streams[i].maxFailures = replicationSet.MaxErrors for _, ingester := range replicationSet.Ingesters { diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index 356a0db0f207f..cea97b4efa31e 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -133,9 +133,10 @@ type mockRing struct { replicationFactor uint32 } -func (r mockRing) Get(key uint32, op ring.Operation) (ring.ReplicationSet, error) { +func (r mockRing) Get(key uint32, op ring.Operation, buf []ring.IngesterDesc) (ring.ReplicationSet, error) { result := ring.ReplicationSet{ MaxErrors: 1, + Ingesters: buf[:0], } for i := uint32(0); i < r.replicationFactor; i++ { n := (key + i) % uint32(len(r.ingesters)) @@ -144,18 +145,6 @@ func (r mockRing) Get(key uint32, op ring.Operation) (ring.ReplicationSet, error return result, nil } -func (r mockRing) BatchGet(keys []uint32, op ring.Operation) ([]ring.ReplicationSet, error) { - result := []ring.ReplicationSet{} - for i := 0; i < len(keys); i++ { - rs, err := r.Get(keys[i], op) - if err != nil { - return nil, err - } - result = append(result, rs) - } - return result, nil -} - func (r mockRing) GetAll() (ring.ReplicationSet, error) { return ring.ReplicationSet{ Ingesters: r.ingesters, @@ -166,3 +155,7 @@ func (r mockRing) GetAll() (ring.ReplicationSet, error) { func (r mockRing) ReplicationFactor() int { return int(r.replicationFactor) } + +func (r mockRing) IngesterCount() int { + return len(r.ingesters) +} diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index cf3435bfb4fb1..2e8030a74cf9b 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -196,15 +196,20 @@ func (q *Querier) Label(ctx context.Context, req *logproto.LabelRequest) (*logpr return nil, err } + userID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, err + } + from, through := model.TimeFromUnixNano(req.Start.UnixNano()), model.TimeFromUnixNano(req.End.UnixNano()) var storeValues []string if req.Values { - storeValues, err = q.store.LabelValuesForMetricName(ctx, from, through, "logs", req.Name) + storeValues, err = q.store.LabelValuesForMetricName(ctx, userID, from, through, "logs", req.Name) if err != nil { return nil, err } } else { - storeValues, err = q.store.LabelNamesForMetricName(ctx, from, through, "logs") + storeValues, err = q.store.LabelNamesForMetricName(ctx, userID, from, through, "logs") if err != nil { return nil, err } diff --git a/pkg/querier/querier_mock_test.go b/pkg/querier/querier_mock_test.go index c20cc1723358f..8bd74c8d927c8 100644 --- a/pkg/querier/querier_mock_test.go +++ b/pkg/querier/querier_mock_test.go @@ -176,13 +176,13 @@ func (s *storeMock) LazyQuery(ctx context.Context, req logql.SelectParams) (iter return args.Get(0).(iter.EntryIterator), args.Error(1) } -func (s *storeMock) Get(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]chunk.Chunk, error) { - args := s.Called(ctx, from, through, matchers) +func (s *storeMock) Get(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]chunk.Chunk, error) { + args := s.Called(ctx, userID, from, through, matchers) return args.Get(0).([]chunk.Chunk), args.Error(1) } -func (s *storeMock) GetChunkRefs(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([][]chunk.Chunk, []*chunk.Fetcher, error) { - args := s.Called(ctx, from, through, matchers) +func (s *storeMock) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([][]chunk.Chunk, []*chunk.Fetcher, error) { + args := s.Called(ctx, userID, from, through, matchers) return args.Get(0).([][]chunk.Chunk), args.Get(0).([]*chunk.Fetcher), args.Error(2) } @@ -194,13 +194,13 @@ func (s *storeMock) PutOne(ctx context.Context, from, through model.Time, chunk return errors.New("storeMock.PutOne() has not been mocked") } -func (s *storeMock) LabelValuesForMetricName(ctx context.Context, from, through model.Time, metricName string, labelName string) ([]string, error) { - args := s.Called(ctx, from, through, metricName, labelName) +func (s *storeMock) LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string, labelName string) ([]string, error) { + args := s.Called(ctx, userID, from, through, metricName, labelName) return args.Get(0).([]string), args.Error(1) } -func (s *storeMock) LabelNamesForMetricName(ctx context.Context, from, through model.Time, metricName string) ([]string, error) { - args := s.Called(ctx, from, through, metricName) +func (s *storeMock) LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string) ([]string, error) { + args := s.Called(ctx, userID, from, through, metricName) return args.Get(0).([]string), args.Error(1) } @@ -229,7 +229,7 @@ func (r *readRingMock) Describe(ch chan<- *prometheus.Desc) { func (r *readRingMock) Collect(ch chan<- prometheus.Metric) { } -func (r *readRingMock) Get(key uint32, op ring.Operation) (ring.ReplicationSet, error) { +func (r *readRingMock) Get(key uint32, op ring.Operation, buf []ring.IngesterDesc) (ring.ReplicationSet, error) { return r.replicationSet, nil } @@ -245,6 +245,10 @@ func (r *readRingMock) ReplicationFactor() int { return 1 } +func (r *readRingMock) IngesterCount() int { + return len(r.replicationSet.Ingesters) +} + func mockReadRingWithOneActiveIngester() *readRingMock { return newReadRingMock([]ring.IngesterDesc{ {Addr: "test", Timestamp: time.Now().UnixNano(), State: ring.ACTIVE, Tokens: []uint32{1, 2, 3}}, diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 75b7d93f152ac..2a197828b95b1 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -42,7 +42,7 @@ func TestQuerier_Label_QueryTimeoutConfigFlag(t *testing.T) { ingesterClient.On("Label", mock.Anything, &request, mock.Anything).Return(mockLabelResponse([]string{}), nil) store := newStoreMock() - store.On("LabelValuesForMetricName", mock.Anything, model.TimeFromUnixNano(startTime.UnixNano()), model.TimeFromUnixNano(endTime.UnixNano()), "logs", "test").Return([]string{"foo", "bar"}, nil) + store.On("LabelValuesForMetricName", mock.Anything, "test", model.TimeFromUnixNano(startTime.UnixNano()), model.TimeFromUnixNano(endTime.UnixNano()), "logs", "test").Return([]string{"foo", "bar"}, nil) limits, err := validation.NewOverrides(defaultLimitsTestConfig()) require.NoError(t, err) diff --git a/pkg/storage/store.go b/pkg/storage/store.go index 1c3c3bc6ea1cf..c14ee623acba0 100644 --- a/pkg/storage/store.go +++ b/pkg/storage/store.go @@ -6,6 +6,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" + "github.com/weaveworks/common/user" "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/chunk/storage" @@ -70,9 +71,14 @@ func (s *store) LazyQuery(ctx context.Context, req logql.SelectParams) (iter.Ent return nil, err } + userID, err := user.ExtractOrgID(ctx) + if err != nil { + return nil, err + } + matchers = append(matchers, nameLabelMatcher) from, through := util.RoundToMilliseconds(req.Start, req.End) - chks, fetchers, err := s.GetChunkRefs(ctx, from, through, matchers...) + chks, fetchers, err := s.GetChunkRefs(ctx, userID, from, through, matchers...) if err != nil { return nil, err } diff --git a/pkg/storage/store_test.go b/pkg/storage/store_test.go index 736a7ee119f1d..799c494007e99 100644 --- a/pkg/storage/store_test.go +++ b/pkg/storage/store_test.go @@ -334,7 +334,8 @@ func Test_store_LazyQuery(t *testing.T) { MaxChunkBatchSize: 10, }, } - it, err := s.LazyQuery(context.Background(), logql.SelectParams{QueryRequest: tt.req}) + ctx = user.InjectOrgID(context.Background(), "test-user") + it, err := s.LazyQuery(ctx, logql.SelectParams{QueryRequest: tt.req}) if err != nil { t.Errorf("store.LazyQuery() error = %v", err) return diff --git a/pkg/storage/util_test.go b/pkg/storage/util_test.go index 9e40775b0b19a..9107438b8f145 100644 --- a/pkg/storage/util_test.go +++ b/pkg/storage/util_test.go @@ -115,14 +115,14 @@ func (m *mockChunkStore) Put(ctx context.Context, chunks []chunk.Chunk) error { func (m *mockChunkStore) PutOne(ctx context.Context, from, through model.Time, chunk chunk.Chunk) error { return nil } -func (m *mockChunkStore) LabelValuesForMetricName(ctx context.Context, from, through model.Time, metricName string, labelName string) ([]string, error) { +func (m *mockChunkStore) LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string, labelName string) ([]string, error) { return nil, nil } -func (m *mockChunkStore) LabelNamesForMetricName(ctx context.Context, from, through model.Time, metricName string) ([]string, error) { +func (m *mockChunkStore) LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string) ([]string, error) { return nil, nil } func (m *mockChunkStore) Stop() {} -func (m *mockChunkStore) Get(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]chunk.Chunk, error) { +func (m *mockChunkStore) Get(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]chunk.Chunk, error) { return nil, nil } @@ -143,7 +143,7 @@ func (m *mockChunkStore) GetChunks(ctx context.Context, chunks []chunk.Chunk) ([ return res, nil } -func (m *mockChunkStore) GetChunkRefs(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([][]chunk.Chunk, []*chunk.Fetcher, error) { +func (m *mockChunkStore) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([][]chunk.Chunk, []*chunk.Fetcher, error) { refs := make([]chunk.Chunk, 0, len(m.chunks)) // transform real chunks into ref chunks. for _, c := range m.chunks { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go index a397b0d044c73..2f20d31028de8 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -14,12 +14,12 @@ import ( // struct and override the specific methods. For example, to override only // the MaxRetries method: // -// type retryer struct { -// client.DefaultRetryer -// } +// type retryer struct { +// client.DefaultRetryer +// } // -// // This implementation always has 100 max retries -// func (d retryer) MaxRetries() int { return 100 } +// // This implementation always has 100 max retries +// func (d retryer) MaxRetries() int { return 100 } type DefaultRetryer struct { NumMaxRetries int } @@ -33,9 +33,9 @@ func (d DefaultRetryer) MaxRetries() int { // RetryRules returns the delay duration before retrying this request again func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { // Set the upper limit of delay in retrying at ~five minutes - minTime := 30 - throttle := d.shouldThrottle(r) - if throttle { + var minTime int64 = 30 + isThrottle := r.IsErrorThrottle() + if isThrottle { if delay, ok := getRetryDelay(r); ok { return delay } @@ -44,13 +44,13 @@ func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { } retryCount := r.RetryCount - if throttle && retryCount > 8 { + if isThrottle && retryCount > 8 { retryCount = 8 } else if retryCount > 13 { retryCount = 13 } - delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime) + delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Int63n(minTime) + minTime) return time.Duration(delay) * time.Millisecond } @@ -65,21 +65,8 @@ func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 { return true } - return r.IsErrorRetryable() || d.shouldThrottle(r) -} - -// ShouldThrottle returns true if the request should be throttled. -func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { - switch r.HTTPResponse.StatusCode { - case 429: - case 502: - case 503: - case 504: - default: - return r.IsErrorThrottle() - } - return true + return r.IsErrorRetryable() || r.IsErrorThrottle() } // This will look in the Retry-After header, RFC 7231, for how long diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index 10634d173d317..fd1e240f6eb52 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -20,7 +20,7 @@ type RequestRetryer interface{} // A Config provides service configuration for service clients. By default, // all clients will use the defaults.DefaultConfig structure. // -// // Create Session with MaxRetry configuration to be shared by multiple +// // Create Session with MaxRetries configuration to be shared by multiple // // service clients. // sess := session.Must(session.NewSession(&aws.Config{ // MaxRetries: aws.Int(3), @@ -251,7 +251,7 @@ type Config struct { // NewConfig returns a new Config pointer that can be chained with builder // methods to set multiple configuration values inline without using pointers. // -// // Create Session with MaxRetry configuration to be shared by multiple +// // Create Session with MaxRetries configuration to be shared by multiple // // service clients. // sess := session.Must(session.NewSession(aws.NewConfig(). // WithMaxRetries(3), diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go index f8853d78af2b1..0c60e612ea567 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -159,9 +159,9 @@ func handleSendError(r *request.Request, err error) { Body: ioutil.NopCloser(bytes.NewReader([]byte{})), } } - // Catch all other request errors. + // Catch all request errors, and let the default retrier determine + // if the error is retryable. r.Error = awserr.New("RequestError", "send request failed", err) - r.Retryable = aws.Bool(true) // network errors are retryable // Override the error with a context canceled error, if that was canceled. ctx := r.Context() @@ -184,37 +184,39 @@ var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseH // AfterRetryHandler performs final checks to determine if the request should // be retried and how long to delay. -var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { - // If one of the other handlers already set the retry state - // we don't want to override it based on the service's state - if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { - r.Retryable = aws.Bool(r.ShouldRetry(r)) - } +var AfterRetryHandler = request.NamedHandler{ + Name: "core.AfterRetryHandler", + Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } - if r.WillRetry() { - r.RetryDelay = r.RetryRules(r) + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) - if sleepFn := r.Config.SleepDelay; sleepFn != nil { - // Support SleepDelay for backwards compatibility and testing - sleepFn(r.RetryDelay) - } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { - r.Error = awserr.New(request.CanceledErrorCode, - "request context canceled", err) - r.Retryable = aws.Bool(false) - return - } + if sleepFn := r.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(r.RetryDelay) + } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", err) + r.Retryable = aws.Bool(false) + return + } - // when the expired token exception occurs the credentials - // need to be expired locally so that the next request to - // get credentials will trigger a credentials refresh. - if r.IsErrorExpired() { - r.Config.Credentials.Expire() - } + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } - r.RetryCount++ - r.Error = nil - } -}} + r.RetryCount++ + r.Error = nil + } + }} // ValidateEndpointHandler is a request handler to validate a request had the // appropriate Region and Endpoint set. Will set r.Error if the endpoint or diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go index c2b2c5d65c392..1a7af53a4da17 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -98,8 +98,8 @@ func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint strin return p } -// NewCredentialsClient returns a Credentials wrapper for retrieving credentials -// from an arbitrary endpoint concurrently. The client will request the +// NewCredentialsClient returns a pointer to a new Credentials object +// wrapping the endpoint credentials Provider. func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go index 20510d9aec8e8..b20b63394847c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go @@ -76,12 +76,15 @@ func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { // uses unix time in nanoseconds to uniquely identify sessions. sessionName = strconv.FormatInt(now().UnixNano(), 10) } - resp, err := p.client.AssumeRoleWithWebIdentity(&sts.AssumeRoleWithWebIdentityInput{ + req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{ RoleArn: &p.roleARN, RoleSessionName: &sessionName, WebIdentityToken: aws.String(string(b)), }) - if err != nil { + // InvalidIdentityToken error is a temporary error that can occur + // when assuming an Role with a JWT web identity token. + req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException) + if err := req.Send(); err != nil { return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go index 2c8d5f56d0e23..d126764ce4e3d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -152,18 +152,19 @@ type EC2IAMInfo struct { // An EC2InstanceIdentityDocument provides the shape for unmarshaling // an instance identity document type EC2InstanceIdentityDocument struct { - DevpayProductCodes []string `json:"devpayProductCodes"` - AvailabilityZone string `json:"availabilityZone"` - PrivateIP string `json:"privateIp"` - Version string `json:"version"` - Region string `json:"region"` - InstanceID string `json:"instanceId"` - BillingProducts []string `json:"billingProducts"` - InstanceType string `json:"instanceType"` - AccountID string `json:"accountId"` - PendingTime time.Time `json:"pendingTime"` - ImageID string `json:"imageId"` - KernelID string `json:"kernelId"` - RamdiskID string `json:"ramdiskId"` - Architecture string `json:"architecture"` + DevpayProductCodes []string `json:"devpayProductCodes"` + MarketplaceProductCodes []string `json:"marketplaceProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index e5031aadc6fb5..f227210cd9100 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -320,6 +320,7 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, @@ -339,6 +340,7 @@ var awsPartition = partition{ "api.sagemaker": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -346,8 +348,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", @@ -581,6 +586,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1588,6 +1594,7 @@ var awsPartition = partition{ "firehose": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1631,6 +1638,7 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "us-east-1": endpoint{}, @@ -1751,9 +1759,33 @@ var awsPartition = partition{ "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "guardduty-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "guardduty-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "guardduty-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "guardduty-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "health": service{ @@ -1962,11 +1994,14 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -2054,6 +2089,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2124,6 +2160,7 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2652,6 +2689,7 @@ var awsPartition = partition{ "runtime.sagemaker": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2659,8 +2697,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", @@ -3134,6 +3175,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -3287,6 +3329,7 @@ var awsPartition = partition{ "storagegateway": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -3507,9 +3550,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -3932,7 +3977,8 @@ var awscnPartition = partition{ }, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "kinesis": service{ @@ -4515,6 +4561,7 @@ var awsusgovPartition = partition{ "health": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, @@ -4613,6 +4660,23 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "neptune": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "organizations": service{ PartitionEndpoint: "aws-us-gov-global", IsRegionalized: boxedFalse, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go index e7c9b2b61af7a..8e332cce6a6ed 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -4,7 +4,6 @@ import ( "bytes" "fmt" "io" - "net" "net/http" "net/url" "reflect" @@ -65,6 +64,15 @@ type Request struct { LastSignedAt time.Time DisableFollowRedirects bool + // Additional API error codes that should be retried. IsErrorRetryable + // will consider these codes in addition to its built in cases. + RetryErrorCodes []string + + // Additional API error codes that should be retried with throttle backoff + // delay. IsErrorThrottle will consider these codes in addition to its + // built in cases. + ThrottleErrorCodes []string + // A value greater than 0 instructs the request to be signed as Presigned URL // You should not set this field directly. Instead use Request's // Presign or PresignRequest methods. @@ -498,21 +506,17 @@ func (r *Request) Send() error { if err := r.sendRequest(); err == nil { return nil - } else if !shouldRetryError(r.Error) { + } + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + + if r.Error != nil || !aws.BoolValue(r.Retryable) { + return r.Error + } + + if err := r.prepareRetry(); err != nil { + r.Error = err return err - } else { - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - - if r.Error != nil || !aws.BoolValue(r.Retryable) { - return r.Error - } - - if err := r.prepareRetry(); err != nil { - r.Error = err - return err - } - continue } } } @@ -596,51 +600,6 @@ func AddToUserAgent(r *Request, s string) { r.HTTPRequest.Header.Set("User-Agent", s) } -type temporary interface { - Temporary() bool -} - -func shouldRetryError(origErr error) bool { - switch err := origErr.(type) { - case awserr.Error: - if err.Code() == CanceledErrorCode { - return false - } - return shouldRetryError(err.OrigErr()) - case *url.Error: - if strings.Contains(err.Error(), "connection refused") { - // Refused connections should be retried as the service may not yet - // be running on the port. Go TCP dial considers refused - // connections as not temporary. - return true - } - // *url.Error only implements Temporary after golang 1.6 but since - // url.Error only wraps the error: - return shouldRetryError(err.Err) - case temporary: - if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" { - return true - } - // If the error is temporary, we want to allow continuation of the - // retry process - return err.Temporary() || isErrConnectionReset(origErr) - case nil: - // `awserr.Error.OrigErr()` can be nil, meaning there was an error but - // because we don't know the cause, it is marked as retryable. See - // TestRequest4xxUnretryable for an example. - return true - default: - switch err.Error() { - case "net/http: request canceled", - "net/http: request canceled while waiting for connection": - // known 1.5 error case when an http request is cancelled - return false - } - // here we don't know the error; so we allow a retry. - return true - } -} - // SanitizeHostForHeader removes default port from host and updates request.Host func SanitizeHostForHeader(r *http.Request) { host := getHost(r) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go index d0aa54c6d1061..c4e659b01d129 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -1,23 +1,41 @@ package request import ( + "net" + "net/url" + "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" ) -// Retryer is an interface to control retry logic for a given service. -// The default implementation used by most services is the client.DefaultRetryer -// structure, which contains basic retry logic using exponential backoff. +// Retryer provides the interface drive the SDK's request retry behavior. The +// Retryer implementation is responsible for implementing exponential backoff, +// and determine if a request API error should be retried. +// +// client.DefaultRetryer is the SDK's default implementation of the Retryer. It +// uses the which uses the Request.IsErrorRetryable and Request.IsErrorThrottle +// methods to determine if the request is retried. type Retryer interface { + // RetryRules return the retry delay that should be used by the SDK before + // making another request attempt for the failed request. RetryRules(*Request) time.Duration + + // ShouldRetry returns if the failed request is retryable. + // + // Implementations may consider request attempt count when determining if a + // request is retryable, but the SDK will use MaxRetries to limit the + // number of attempts a request are made. ShouldRetry(*Request) bool + + // MaxRetries is the number of times a request may be retried before + // failing. MaxRetries() int } -// WithRetryer sets a config Retryer value to the given Config returning it -// for chaining. +// WithRetryer sets a Retryer value to the given Config returning the Config +// value for chaining. func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { cfg.Retryer = retryer return cfg @@ -108,32 +126,90 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool { // IsErrorRetryable returns whether the error is retryable, based on its Code. // Returns false if error is nil. func IsErrorRetryable(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr) + if err == nil { + return false + } + return shouldRetryError(err) +} + +type temporary interface { + Temporary() bool +} + +func shouldRetryError(origErr error) bool { + switch err := origErr.(type) { + case awserr.Error: + if err.Code() == CanceledErrorCode { + return false + } + if isNestedErrorRetryable(err) { + return true + } + + origErr := err.OrigErr() + var shouldRetry bool + if origErr != nil { + shouldRetry := shouldRetryError(origErr) + if err.Code() == "RequestError" && !shouldRetry { + return false + } + } + if isCodeRetryable(err.Code()) { + return true + } + return shouldRetry + + case *url.Error: + if strings.Contains(err.Error(), "connection refused") { + // Refused connections should be retried as the service may not yet + // be running on the port. Go TCP dial considers refused + // connections as not temporary. + return true + } + // *url.Error only implements Temporary after golang 1.6 but since + // url.Error only wraps the error: + return shouldRetryError(err.Err) + + case temporary: + if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" { + return true + } + // If the error is temporary, we want to allow continuation of the + // retry process + return err.Temporary() || isErrConnectionReset(origErr) + + case nil: + // `awserr.Error.OrigErr()` can be nil, meaning there was an error but + // because we don't know the cause, it is marked as retryable. See + // TestRequest4xxUnretryable for an example. + return true + + default: + switch err.Error() { + case "net/http: request canceled", + "net/http: request canceled while waiting for connection": + // known 1.5 error case when an http request is cancelled + return false } + // here we don't know the error; so we allow a retry. + return true } - return false } // IsErrorThrottle returns whether the error is to be throttled based on its code. // Returns false if error is nil. func IsErrorThrottle(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeThrottle(aerr.Code()) - } + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeThrottle(aerr.Code()) } return false } -// IsErrorExpiredCreds returns whether the error code is a credential expiry error. -// Returns false if error is nil. +// IsErrorExpiredCreds returns whether the error code is a credential expiry +// error. Returns false if error is nil. func IsErrorExpiredCreds(err error) bool { - if err != nil { - if aerr, ok := err.(awserr.Error); ok { - return isCodeExpiredCreds(aerr.Code()) - } + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeExpiredCreds(aerr.Code()) } return false } @@ -143,17 +219,44 @@ func IsErrorExpiredCreds(err error) bool { // // Alias for the utility function IsErrorRetryable func (r *Request) IsErrorRetryable() bool { + if isErrCode(r.Error, r.RetryErrorCodes) { + return true + } + return IsErrorRetryable(r.Error) } -// IsErrorThrottle returns whether the error is to be throttled based on its code. -// Returns false if the request has no Error set +// IsErrorThrottle returns whether the error is to be throttled based on its +// code. Returns false if the request has no Error set. // // Alias for the utility function IsErrorThrottle func (r *Request) IsErrorThrottle() bool { + if isErrCode(r.Error, r.ThrottleErrorCodes) { + return true + } + + if r.HTTPResponse != nil { + switch r.HTTPResponse.StatusCode { + case 429, 502, 503, 504: + return true + } + } + return IsErrorThrottle(r.Error) } +func isErrCode(err error, codes []string) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + for _, code := range codes { + if code == aerr.Code() { + return true + } + } + } + + return false +} + // IsErrorExpired returns whether the error code is a credential expiry error. // Returns false if the request has no Error set. // diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index 3a998d5bd626a..60a6f9ce2a4dd 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -99,10 +99,10 @@ type envConfig struct { CustomCABundle string csmEnabled string - CSMEnabled bool + CSMEnabled *bool CSMPort string - CSMClientID string CSMHost string + CSMClientID string // Enables endpoint discovery via environment variables. // @@ -230,7 +230,11 @@ func envConfigLoad(enableSharedConfig bool) envConfig { setFromEnvVal(&cfg.CSMHost, csmHostEnvKey) setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) - cfg.CSMEnabled = len(cfg.csmEnabled) > 0 + + if len(cfg.csmEnabled) != 0 { + v, _ := strconv.ParseBool(cfg.csmEnabled) + cfg.CSMEnabled = &v + } regionKeys := regionEnvKeys profileKeys := profileEnvKeys diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 1b4fcdb10e196..fa1362ff7406d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -104,9 +104,13 @@ func New(cfgs ...*aws.Config) *Session { } s := deprecatedNewSession(cfgs...) - if envCfg.CSMEnabled { - err := enableCSM(&s.Handlers, envCfg.CSMClientID, - envCfg.CSMHost, envCfg.CSMPort, s.Config.Logger) + + if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger) if err != nil { err = fmt.Errorf("failed to enable CSM, %v", err) s.Config.Logger.Log("ERROR:", err.Error()) @@ -347,15 +351,12 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session { return s } -func enableCSM(handlers *request.Handlers, - clientID, host, port string, - logger aws.Logger, -) error { +func enableCSM(handlers *request.Handlers, cfg csmConfig, logger aws.Logger) error { if logger != nil { logger.Log("Enabling CSM") } - r, err := csm.Start(clientID, csm.AddressWithDefaults(host, port)) + r, err := csm.Start(cfg.ClientID, csm.AddressWithDefaults(cfg.Host, cfg.Port)) if err != nil { return err } @@ -395,7 +396,13 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, // Load additional config from file(s) sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig) if err != nil { - if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && (envCfg.Creds.HasKeys() || userCfg.Credentials != nil) { + // Special case where the user has not explicitly specified an AWS_PROFILE, + // or session.Options.profile, shared config is not enabled, and the + // environment has credentials, allow the shared config file to fail to + // load since the user has already provided credentials, and nothing else + // is required to be read file. Github(aws/aws-sdk-go#2455) + } else if _, ok := err.(SharedConfigProfileNotExistsError); !ok { return nil, err } } @@ -410,9 +417,13 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, } initHandlers(s) - if envCfg.CSMEnabled { - err := enableCSM(&s.Handlers, envCfg.CSMClientID, - envCfg.CSMHost, envCfg.CSMPort, s.Config.Logger) + + if csmCfg, err := loadCSMConfig(envCfg, cfgFiles); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err = enableCSM(&s.Handlers, csmCfg, s.Config.Logger) if err != nil { return nil, err } @@ -428,6 +439,46 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, return s, nil } +type csmConfig struct { + Enabled bool + Host string + Port string + ClientID string +} + +var csmProfileName = "aws_csm" + +func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) { + if envCfg.CSMEnabled != nil { + if *envCfg.CSMEnabled { + return csmConfig{ + Enabled: true, + ClientID: envCfg.CSMClientID, + Host: envCfg.CSMHost, + Port: envCfg.CSMPort, + }, nil + } + return csmConfig{}, nil + } + + sharedCfg, err := loadSharedConfig(csmProfileName, cfgFiles, false) + if err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return csmConfig{}, err + } + } + if sharedCfg.CSMEnabled != nil && *sharedCfg.CSMEnabled == true { + return csmConfig{ + Enabled: true, + ClientID: sharedCfg.CSMClientID, + Host: sharedCfg.CSMHost, + Port: sharedCfg.CSMPort, + }, nil + } + + return csmConfig{}, nil +} + func loadCustomCABundle(s *Session, bundle io.Reader) error { var t *http.Transport switch v := s.Config.HTTPClient.Transport.(type) { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index 5170b4982e068..d91ac93a54443 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -22,6 +22,12 @@ const ( mfaSerialKey = `mfa_serial` // optional roleSessionNameKey = `role_session_name` // optional + // CSM options + csmEnabledKey = `csm_enabled` + csmHostKey = `csm_host` + csmPortKey = `csm_port` + csmClientIDKey = `csm_client_id` + // Additional Config fields regionKey = `region` @@ -76,6 +82,12 @@ type sharedConfig struct { // // endpoint_discovery_enabled = true EnableEndpointDiscovery *bool + + // CSM Options + CSMEnabled *bool + CSMHost string + CSMPort string + CSMClientID string } type sharedConfigFile struct { @@ -251,10 +263,13 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e } // Endpoint discovery - if section.Has(enableEndpointDiscoveryKey) { - v := section.Bool(enableEndpointDiscoveryKey) - cfg.EnableEndpointDiscovery = &v - } + updateBoolPtr(&cfg.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) + + // CSM options + updateBoolPtr(&cfg.CSMEnabled, section, csmEnabledKey) + updateString(&cfg.CSMHost, section, csmHostKey) + updateString(&cfg.CSMPort, section, csmPortKey) + updateString(&cfg.CSMClientID, section, csmClientIDKey) return nil } @@ -348,6 +363,16 @@ func updateString(dst *string, section ini.Section, key string) { *dst = section.String(key) } +// updateBoolPtr will only update the dst with the value in the section key, +// key is present in the section. +func updateBoolPtr(dst **bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = new(bool) + **dst = section.Bool(key) +} + // SharedConfigLoadError is an error for the shared config file failed to load. type SharedConfigLoadError struct { Filename string diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index b0009b3bf88bf..742d63a3232de 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.22.4" +const SDKVersion = "1.23.12" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go new file mode 100644 index 0000000000000..44898eed0fdd7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go @@ -0,0 +1,15 @@ +// +build go1.10 + +package sdkmath + +import "math" + +// Round returns the nearest integer, rounding half away from zero. +// +// Special cases are: +// Round(±0) = ±0 +// Round(±Inf) = ±Inf +// Round(NaN) = NaN +func Round(x float64) float64 { + return math.Round(x) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go new file mode 100644 index 0000000000000..810ec7f08b004 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go @@ -0,0 +1,56 @@ +// +build !go1.10 + +package sdkmath + +import "math" + +// Copied from the Go standard library's (Go 1.12) math/floor.go for use in +// Go version prior to Go 1.10. +const ( + uvone = 0x3FF0000000000000 + mask = 0x7FF + shift = 64 - 11 - 1 + bias = 1023 + signMask = 1 << 63 + fracMask = 1<= 0.5 { + // return t + Copysign(1, x) + // } + // return t + // } + bits := math.Float64bits(x) + e := uint(bits>>shift) & mask + if e < bias { + // Round abs(x) < 1 including denormals. + bits &= signMask // +-0 + if e == bias-1 { + bits |= uvone // +-1 + } + } else if e < bias+shift { + // Round any abs(x) >= 1 containing a fractional component [0,1). + // + // Numbers with larger exponents are returned unchanged since they + // must be either an integer, infinity, or NaN. + const half = 1 << (shift - 1) + e -= bias + bits += half >> e + bits &^= fracMask >> e + } + return math.Float64frombits(bits) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go index de021367da24f..74e361e070d5b 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -146,6 +146,9 @@ func unmarshalStatusCode(v reflect.Value, statusCode int) { } func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { + if len(headers) == 0 { + return nil + } switch r.Interface().(type) { case map[string]*string: // we only support string map value types out := map[string]*string{} @@ -155,19 +158,28 @@ func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) err out[k[len(prefix):]] = &v[0] } } - r.Set(reflect.ValueOf(out)) + if len(out) != 0 { + r.Set(reflect.ValueOf(out)) + } + } return nil } func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { - isJSONValue := tag.Get("type") == "jsonvalue" - if isJSONValue { + switch tag.Get("type") { + case "jsonvalue": if len(header) == 0 { return nil } - } else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { - return nil + case "blob": + if len(header) == 0 { + return nil + } + default: + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } } switch v.Interface().(type) { @@ -178,7 +190,7 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro if err != nil { return err } - v.Set(reflect.ValueOf(&b)) + v.Set(reflect.ValueOf(b)) case *bool: b, err := strconv.ParseBool(header) if err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go index cf569645dc222..07a6187ea624f 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -39,7 +39,7 @@ func Build(r *request.Request) { r.Error = awserr.NewRequestFailure( awserr.New(request.ErrCodeSerialization, "failed to encode rest XML request", err), - r.HTTPResponse.StatusCode, + 0, r.RequestID, ) return diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go index b7ed6c6f8100e..05d4ff5192583 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -1,8 +1,11 @@ package protocol import ( + "math" "strconv" "time" + + "github.com/aws/aws-sdk-go/internal/sdkmath" ) // Names of time formats supported by the SDK @@ -13,12 +16,19 @@ const ( ) // Time formats supported by the SDK +// Output time is intended to not contain decimals const ( // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + // This format is used for output time without seconds precision + RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" + // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z - ISO8601TimeFormat = "2006-01-02T15:04:05Z" + ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z" + + // This format is used for output time without seconds precision + ISO8601OutputTimeFormat = "2006-01-02T15:04:05Z" ) // IsKnownTimestampFormat returns if the timestamp format name @@ -42,9 +52,9 @@ func FormatTime(name string, t time.Time) string { switch name { case RFC822TimeFormatName: - return t.Format(RFC822TimeFormat) + return t.Format(RFC822OutputTimeFormat) case ISO8601TimeFormatName: - return t.Format(ISO8601TimeFormat) + return t.Format(ISO8601OutputTimeFormat) case UnixTimeFormatName: return strconv.FormatInt(t.Unix(), 10) default: @@ -62,10 +72,12 @@ func ParseTime(formatName, value string) (time.Time, error) { return time.Parse(ISO8601TimeFormat, value) case UnixTimeFormatName: v, err := strconv.ParseFloat(value, 64) + _, dec := math.Modf(v) + dec = sdkmath.Round(dec*1e3) / 1e3 //Rounds 0.1229999 to 0.123 if err != nil { return time.Time{}, err } - return time.Unix(int64(v), 0), nil + return time.Unix(int64(v), int64(dec*(1e9))), nil default: panic("unknown timestamp format name, " + formatName) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/api.go b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/api.go index d27d68b66a185..83cc7565b92f8 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/api.go @@ -815,6 +815,12 @@ func (c *ApplicationAutoScaling) DescribeScheduledActionsRequest(input *Describe Name: opDescribeScheduledActions, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -881,6 +887,56 @@ func (c *ApplicationAutoScaling) DescribeScheduledActionsWithContext(ctx aws.Con return out, req.Send() } +// DescribeScheduledActionsPages iterates over the pages of a DescribeScheduledActions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeScheduledActions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeScheduledActions operation. +// pageNum := 0 +// err := client.DescribeScheduledActionsPages(params, +// func(page *applicationautoscaling.DescribeScheduledActionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ApplicationAutoScaling) DescribeScheduledActionsPages(input *DescribeScheduledActionsInput, fn func(*DescribeScheduledActionsOutput, bool) bool) error { + return c.DescribeScheduledActionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeScheduledActionsPagesWithContext same as DescribeScheduledActionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ApplicationAutoScaling) DescribeScheduledActionsPagesWithContext(ctx aws.Context, input *DescribeScheduledActionsInput, fn func(*DescribeScheduledActionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeScheduledActionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeScheduledActionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeScheduledActionsOutput), !p.HasNextPage()) + } + return p.Err() +} + const opPutScalingPolicy = "PutScalingPolicy" // PutScalingPolicyRequest generates a "aws/request.Request" representing the @@ -3315,6 +3371,26 @@ type RegisterScalableTargetInput struct { // // ServiceNamespace is a required field ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` + + // An embedded object that contains attributes and attribute values that are + // used to suspend and resume automatic scaling. Setting the value of an attribute + // to true suspends the specified scaling activities. Setting it to false (default) + // resumes the specified scaling activities. + // + // Suspension Outcomes + // + // * For DynamicScalingInSuspended, while a suspension is in effect, all + // scale-in activities that are triggered by a scaling policy are suspended. + // + // * For DynamicScalingOutSuspended, while a suspension is in effect, all + // scale-out activities that are triggered by a scaling policy are suspended. + // + // * For ScheduledScalingSuspended, while a suspension is in effect, all + // scaling activities that involve scheduled actions are suspended. + // + // For more information, see Suspend and Resume Application Auto Scaling (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-suspend-resume-scaling.html) + // in the Application Auto Scaling User Guide. + SuspendedState *SuspendedState `type:"structure"` } // String returns the string representation @@ -3388,6 +3464,12 @@ func (s *RegisterScalableTargetInput) SetServiceNamespace(v string) *RegisterSca return s } +// SetSuspendedState sets the SuspendedState field's value. +func (s *RegisterScalableTargetInput) SetSuspendedState(v *SuspendedState) *RegisterScalableTargetInput { + s.SuspendedState = v + return s +} + type RegisterScalableTargetOutput struct { _ struct{} `type:"structure"` } @@ -3508,6 +3590,10 @@ type ScalableTarget struct { // // ServiceNamespace is a required field ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` + + // Specifies whether the scaling activities for a scalable target are in a suspended + // state. + SuspendedState *SuspendedState `type:"structure"` } // String returns the string representation @@ -3562,6 +3648,12 @@ func (s *ScalableTarget) SetServiceNamespace(v string) *ScalableTarget { return s } +// SetSuspendedState sets the SuspendedState field's value. +func (s *ScalableTarget) SetSuspendedState(v *SuspendedState) *ScalableTarget { + s.SuspendedState = v + return s +} + // Represents the minimum and maximum capacity for a scheduled action. type ScalableTargetAction struct { _ struct{} `type:"structure"` @@ -4390,6 +4482,55 @@ func (s *StepScalingPolicyConfiguration) SetStepAdjustments(v []*StepAdjustment) return s } +// Specifies whether the scaling activities for a scalable target are in a suspended +// state. +type SuspendedState struct { + _ struct{} `type:"structure"` + + // Whether scale in by a target tracking scaling policy or a step scaling policy + // is suspended. Set the value to true if you don't want Application Auto Scaling + // to remove capacity when a scaling policy is triggered. The default is false. + DynamicScalingInSuspended *bool `type:"boolean"` + + // Whether scale out by a target tracking scaling policy or a step scaling policy + // is suspended. Set the value to true if you don't want Application Auto Scaling + // to add capacity when a scaling policy is triggered. The default is false. + DynamicScalingOutSuspended *bool `type:"boolean"` + + // Whether scheduled scaling is suspended. Set the value to true if you don't + // want Application Auto Scaling to add or remove capacity by initiating scheduled + // actions. The default is false. + ScheduledScalingSuspended *bool `type:"boolean"` +} + +// String returns the string representation +func (s SuspendedState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SuspendedState) GoString() string { + return s.String() +} + +// SetDynamicScalingInSuspended sets the DynamicScalingInSuspended field's value. +func (s *SuspendedState) SetDynamicScalingInSuspended(v bool) *SuspendedState { + s.DynamicScalingInSuspended = &v + return s +} + +// SetDynamicScalingOutSuspended sets the DynamicScalingOutSuspended field's value. +func (s *SuspendedState) SetDynamicScalingOutSuspended(v bool) *SuspendedState { + s.DynamicScalingOutSuspended = &v + return s +} + +// SetScheduledScalingSuspended sets the ScheduledScalingSuspended field's value. +func (s *SuspendedState) SetScheduledScalingSuspended(v bool) *SuspendedState { + s.ScheduledScalingSuspended = &v + return s +} + // Represents a target tracking scaling policy configuration to use with Application // Auto Scaling. type TargetTrackingScalingPolicyConfiguration struct { diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/applicationautoscalingiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/applicationautoscalingiface/interface.go index 561d28cee2d70..a5165752810e1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/applicationautoscalingiface/interface.go +++ b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/applicationautoscalingiface/interface.go @@ -97,6 +97,9 @@ type ApplicationAutoScalingAPI interface { DescribeScheduledActionsWithContext(aws.Context, *applicationautoscaling.DescribeScheduledActionsInput, ...request.Option) (*applicationautoscaling.DescribeScheduledActionsOutput, error) DescribeScheduledActionsRequest(*applicationautoscaling.DescribeScheduledActionsInput) (*request.Request, *applicationautoscaling.DescribeScheduledActionsOutput) + DescribeScheduledActionsPages(*applicationautoscaling.DescribeScheduledActionsInput, func(*applicationautoscaling.DescribeScheduledActionsOutput, bool) bool) error + DescribeScheduledActionsPagesWithContext(aws.Context, *applicationautoscaling.DescribeScheduledActionsInput, func(*applicationautoscaling.DescribeScheduledActionsOutput, bool) bool, ...request.Option) error + PutScalingPolicy(*applicationautoscaling.PutScalingPolicyInput) (*applicationautoscaling.PutScalingPolicyOutput, error) PutScalingPolicyWithContext(aws.Context, *applicationautoscaling.PutScalingPolicyInput, ...request.Option) (*applicationautoscaling.PutScalingPolicyOutput, error) PutScalingPolicyRequest(*applicationautoscaling.PutScalingPolicyInput) (*request.Request, *applicationautoscaling.PutScalingPolicyOutput) diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/doc.go b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/doc.go index 53002308c4d48..fbaa9db469362 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/doc.go @@ -24,7 +24,7 @@ // // API Summary // -// The Application Auto Scaling service API includes two key sets of actions: +// The Application Auto Scaling service API includes three key sets of actions: // // * Register and manage scalable targets - Register AWS or custom resources // as scalable targets (a resource that Application Auto Scaling can scale), @@ -36,6 +36,12 @@ // one-time or recurring scaling actions, and retrieve your recent scaling // activity history. // +// * Suspend and resume scaling - Temporarily suspend and later resume automatic +// scaling by calling the RegisterScalableTarget action for any Application +// Auto Scaling scalable target. You can suspend and resume, individually +// or in combination, scale-out activities triggered by a scaling policy, +// scale-in activities triggered by a scaling policy, and scheduled scaling. +// // To learn more about Application Auto Scaling, including information about // granting IAM users required permissions for Application Auto Scaling actions, // see the Application Auto Scaling User Guide (https://docs.aws.amazon.com/autoscaling/application/userguide/what-is-application-auto-scaling.html). diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index 0385cb15b4f1d..4b4802db0ce96 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -12685,6 +12685,80 @@ func (c *EC2) DescribeElasticGpusWithContext(ctx aws.Context, input *DescribeEla return out, req.Send() } +const opDescribeExportImageTasks = "DescribeExportImageTasks" + +// DescribeExportImageTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeExportImageTasks operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeExportImageTasks for more information on using the DescribeExportImageTasks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeExportImageTasksRequest method. +// req, resp := client.DescribeExportImageTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeExportImageTasks +func (c *EC2) DescribeExportImageTasksRequest(input *DescribeExportImageTasksInput) (req *request.Request, output *DescribeExportImageTasksOutput) { + op := &request.Operation{ + Name: opDescribeExportImageTasks, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeExportImageTasksInput{} + } + + output = &DescribeExportImageTasksOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeExportImageTasks API operation for Amazon Elastic Compute Cloud. +// +// Describes the specified export image tasks or all your export image tasks. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeExportImageTasks for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeExportImageTasks +func (c *EC2) DescribeExportImageTasks(input *DescribeExportImageTasksInput) (*DescribeExportImageTasksOutput, error) { + req, out := c.DescribeExportImageTasksRequest(input) + return out, req.Send() +} + +// DescribeExportImageTasksWithContext is the same as DescribeExportImageTasks with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeExportImageTasks for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeExportImageTasksWithContext(ctx aws.Context, input *DescribeExportImageTasksInput, opts ...request.Option) (*DescribeExportImageTasksOutput, error) { + req, out := c.DescribeExportImageTasksRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeExportTasks = "DescribeExportTasks" // DescribeExportTasksRequest generates a "aws/request.Request" representing the @@ -12729,7 +12803,8 @@ func (c *EC2) DescribeExportTasksRequest(input *DescribeExportTasksInput) (req * // DescribeExportTasks API operation for Amazon Elastic Compute Cloud. // -// Describes the specified export tasks or all your export tasks. +// Describes the specified export instance tasks or all your export instance +// tasks. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -24001,6 +24076,82 @@ func (c *EC2) ExportClientVpnClientConfigurationWithContext(ctx aws.Context, inp return out, req.Send() } +const opExportImage = "ExportImage" + +// ExportImageRequest generates a "aws/request.Request" representing the +// client's request for the ExportImage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ExportImage for more information on using the ExportImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ExportImageRequest method. +// req, resp := client.ExportImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ExportImage +func (c *EC2) ExportImageRequest(input *ExportImageInput) (req *request.Request, output *ExportImageOutput) { + op := &request.Operation{ + Name: opExportImage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ExportImageInput{} + } + + output = &ExportImageOutput{} + req = c.newRequest(op, input, output) + return +} + +// ExportImage API operation for Amazon Elastic Compute Cloud. +// +// Exports an Amazon Machine Image (AMI) to a VM file. For more information, +// see Exporting a VM Directory from an Amazon Machine Image (AMI) (https://docs.aws.amazon.com/vm-import/latest/userguide/vmexport_image.html) +// in the VM Import/Export User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ExportImage for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ExportImage +func (c *EC2) ExportImage(input *ExportImageInput) (*ExportImageOutput, error) { + req, out := c.ExportImageRequest(input) + return out, req.Send() +} + +// ExportImageWithContext is the same as ExportImage with the addition of +// the ability to pass a context and additional request options. +// +// See ExportImage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ExportImageWithContext(ctx aws.Context, input *ExportImageInput, opts ...request.Option) (*ExportImageOutput, error) { + req, out := c.ExportImageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opExportTransitGatewayRoutes = "ExportTransitGatewayRoutes" // ExportTransitGatewayRoutesRequest generates a "aws/request.Request" representing the @@ -24121,6 +24272,12 @@ func (c *EC2) GetCapacityReservationUsageRequest(input *GetCapacityReservationUs // GetCapacityReservationUsage API operation for Amazon Elastic Compute Cloud. // +// Gets usage information about a Capacity Reservation. If the Capacity Reservation +// is shared, it shows usage information for the Capacity Reservation owner +// and each AWS account that is currently using the shared capacity. If the +// Capacity Reservation is not shared, it shows only the Capacity Reservation +// owner's usage. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -25943,8 +26100,35 @@ func (c *EC2) ModifyFleetRequest(input *ModifyFleetInput) (req *request.Request, // // Modifies the specified EC2 Fleet. // +// You can only modify an EC2 Fleet request of type maintain. +// // While the EC2 Fleet is being modified, it is in the modifying state. // +// To scale up your EC2 Fleet, increase its target capacity. The EC2 Fleet launches +// the additional Spot Instances according to the allocation strategy for the +// EC2 Fleet request. If the allocation strategy is lowestPrice, the EC2 Fleet +// launches instances using the Spot Instance pool with the lowest price. If +// the allocation strategy is diversified, the EC2 Fleet distributes the instances +// across the Spot Instance pools. If the allocation strategy is capacityOptimized, +// EC2 Fleet launches instances from Spot Instance pools with optimal capacity +// for the number of instances that are launching. +// +// To scale down your EC2 Fleet, decrease its target capacity. First, the EC2 +// Fleet cancels any open requests that exceed the new target capacity. You +// can request that the EC2 Fleet terminate Spot Instances until the size of +// the fleet no longer exceeds the new target capacity. If the allocation strategy +// is lowestPrice, the EC2 Fleet terminates the instances with the highest price +// per unit. If the allocation strategy is capacityOptimized, the EC2 Fleet +// terminates the instances in the Spot Instance pools that have the least available +// Spot Instance capacity. If the allocation strategy is diversified, the EC2 +// Fleet terminates instances across the Spot Instance pools. Alternatively, +// you can request that the EC2 Fleet keep the fleet at its current size, but +// not replace any Spot Instances that are interrupted or that you terminate +// manually. +// +// If you are finished with your EC2 Fleet for now, but will use it again later, +// you can set the target capacity to 0. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -27097,6 +27281,7 @@ func (c *EC2) ModifySnapshotAttributeRequest(input *ModifySnapshotAttributeInput // or remove specified AWS account IDs from a snapshot's list of create volume // permissions, but you cannot do both in a single operation. If you need to // both add and remove account IDs for a snapshot, you must use multiple operations. +// You can make up to 500 modifications to a snapshot in a single operation. // // Encrypted snapshots and snapshots with AWS Marketplace product codes cannot // be made public. Snapshots encrypted with your default CMK cannot be shared @@ -27187,19 +27372,24 @@ func (c *EC2) ModifySpotFleetRequestRequest(input *ModifySpotFleetRequestInput) // To scale up your Spot Fleet, increase its target capacity. The Spot Fleet // launches the additional Spot Instances according to the allocation strategy // for the Spot Fleet request. If the allocation strategy is lowestPrice, the -// Spot Fleet launches instances using the Spot pool with the lowest price. -// If the allocation strategy is diversified, the Spot Fleet distributes the -// instances across the Spot pools. +// Spot Fleet launches instances using the Spot Instance pool with the lowest +// price. If the allocation strategy is diversified, the Spot Fleet distributes +// the instances across the Spot Instance pools. If the allocation strategy +// is capacityOptimized, Spot Fleet launches instances from Spot Instance pools +// with optimal capacity for the number of instances that are launching. // // To scale down your Spot Fleet, decrease its target capacity. First, the Spot // Fleet cancels any open requests that exceed the new target capacity. You // can request that the Spot Fleet terminate Spot Instances until the size of // the fleet no longer exceeds the new target capacity. If the allocation strategy // is lowestPrice, the Spot Fleet terminates the instances with the highest -// price per unit. If the allocation strategy is diversified, the Spot Fleet -// terminates instances across the Spot pools. Alternatively, you can request -// that the Spot Fleet keep the fleet at its current size, but not replace any -// Spot Instances that are interrupted or that you terminate manually. +// price per unit. If the allocation strategy is capacityOptimized, the Spot +// Fleet terminates the instances in the Spot Instance pools that have the least +// available Spot Instance capacity. If the allocation strategy is diversified, +// the Spot Fleet terminates instances across the Spot Instance pools. Alternatively, +// you can request that the Spot Fleet keep the fleet at its current size, but +// not replace any Spot Instances that are interrupted or that you terminate +// manually. // // If you are finished with your Spot Fleet for now, but will use it again later, // you can set the target capacity to 0. @@ -28484,6 +28674,80 @@ func (c *EC2) ModifyVpnConnectionWithContext(ctx aws.Context, input *ModifyVpnCo return out, req.Send() } +const opModifyVpnTunnelCertificate = "ModifyVpnTunnelCertificate" + +// ModifyVpnTunnelCertificateRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVpnTunnelCertificate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyVpnTunnelCertificate for more information on using the ModifyVpnTunnelCertificate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyVpnTunnelCertificateRequest method. +// req, resp := client.ModifyVpnTunnelCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpnTunnelCertificate +func (c *EC2) ModifyVpnTunnelCertificateRequest(input *ModifyVpnTunnelCertificateInput) (req *request.Request, output *ModifyVpnTunnelCertificateOutput) { + op := &request.Operation{ + Name: opModifyVpnTunnelCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVpnTunnelCertificateInput{} + } + + output = &ModifyVpnTunnelCertificateOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyVpnTunnelCertificate API operation for Amazon Elastic Compute Cloud. +// +// Modifies the VPN tunnel endpoint certificate. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyVpnTunnelCertificate for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpnTunnelCertificate +func (c *EC2) ModifyVpnTunnelCertificate(input *ModifyVpnTunnelCertificateInput) (*ModifyVpnTunnelCertificateOutput, error) { + req, out := c.ModifyVpnTunnelCertificateRequest(input) + return out, req.Send() +} + +// ModifyVpnTunnelCertificateWithContext is the same as ModifyVpnTunnelCertificate with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyVpnTunnelCertificate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyVpnTunnelCertificateWithContext(ctx aws.Context, input *ModifyVpnTunnelCertificateInput, opts ...request.Option) (*ModifyVpnTunnelCertificateOutput, error) { + req, out := c.ModifyVpnTunnelCertificateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opMonitorInstances = "MonitorInstances" // MonitorInstancesRequest generates a "aws/request.Request" representing the @@ -30179,10 +30443,10 @@ func (c *EC2) RequestSpotFleetRequest(input *RequestSpotFleetInput) (req *reques // You can submit a single request that includes multiple launch specifications // that vary by instance type, AMI, Availability Zone, or subnet. // -// By default, the Spot Fleet requests Spot Instances in the Spot pool where -// the price per unit is the lowest. Each launch specification can include its -// own instance weighting that reflects the value of the instance type to your -// application workload. +// By default, the Spot Fleet requests Spot Instances in the Spot Instance pool +// where the price per unit is the lowest. Each launch specification can include +// its own instance weighting that reflects the value of the instance type to +// your application workload. // // Alternatively, you can specify that the Spot Fleet distribute the target // capacity across the Spot pools included in its launch specifications. By @@ -31384,6 +31648,98 @@ func (c *EC2) SearchTransitGatewayRoutesWithContext(ctx aws.Context, input *Sear return out, req.Send() } +const opSendDiagnosticInterrupt = "SendDiagnosticInterrupt" + +// SendDiagnosticInterruptRequest generates a "aws/request.Request" representing the +// client's request for the SendDiagnosticInterrupt operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SendDiagnosticInterrupt for more information on using the SendDiagnosticInterrupt +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SendDiagnosticInterruptRequest method. +// req, resp := client.SendDiagnosticInterruptRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SendDiagnosticInterrupt +func (c *EC2) SendDiagnosticInterruptRequest(input *SendDiagnosticInterruptInput) (req *request.Request, output *SendDiagnosticInterruptOutput) { + op := &request.Operation{ + Name: opSendDiagnosticInterrupt, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendDiagnosticInterruptInput{} + } + + output = &SendDiagnosticInterruptOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// SendDiagnosticInterrupt API operation for Amazon Elastic Compute Cloud. +// +// Sends a diagnostic interrupt to the specified Amazon EC2 instance to trigger +// a kernel panic (on Linux instances), or a blue screen/stop error (on Windows +// instances). For instances based on Intel and AMD processors, the interrupt +// is received as a non-maskable interrupt (NMI). +// +// In general, the operating system crashes and reboots when a kernel panic +// or stop error is triggered. The operating system can also be configured to +// perform diagnostic tasks, such as generating a memory dump file, loading +// a secondary kernel, or obtaining a call trace. +// +// Before sending a diagnostic interrupt to your instance, ensure that its operating +// system is configured to perform the required diagnostic tasks. +// +// For more information about configuring your operating system to generate +// a crash dump when a kernel panic or stop error occurs, see Send a Diagnostic +// Interrupt (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/diagnostic-interrupt.html) +// (Linux instances) or Send a Diagnostic Interrupt (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/diagnostic-interrupt.html) +// (Windows instances). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation SendDiagnosticInterrupt for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/SendDiagnosticInterrupt +func (c *EC2) SendDiagnosticInterrupt(input *SendDiagnosticInterruptInput) (*SendDiagnosticInterruptOutput, error) { + req, out := c.SendDiagnosticInterruptRequest(input) + return out, req.Send() +} + +// SendDiagnosticInterruptWithContext is the same as SendDiagnosticInterrupt with the addition of +// the ability to pass a context and additional request options. +// +// See SendDiagnosticInterrupt for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) SendDiagnosticInterruptWithContext(ctx aws.Context, input *SendDiagnosticInterruptInput, opts ...request.Option) (*SendDiagnosticInterruptOutput, error) { + req, out := c.SendDiagnosticInterruptRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opStartInstances = "StartInstances" // StartInstancesRequest generates a "aws/request.Request" representing the @@ -36457,12 +36813,14 @@ type CapacityReservation struct { // The Availability Zone in which the capacity is reserved. AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + // The Availability Zone ID of the Capacity Reservation. AvailabilityZoneId *string `locationName:"availabilityZoneId" type:"string"` // The remaining capacity. Indicates the number of instances that can be launched // in the Capacity Reservation. AvailableInstanceCount *int64 `locationName:"availableInstanceCount" type:"integer"` + // The Amazon Resource Name (ARN) of the Capacity Reservation. CapacityReservationArn *string `locationName:"capacityReservationArn" type:"string"` // The ID of the Capacity Reservation. @@ -36519,6 +36877,7 @@ type CapacityReservation struct { // The type of instance for which the Capacity Reservation reserves capacity. InstanceType *string `locationName:"instanceType" type:"string"` + // The ID of the AWS account that owns the Capacity Reservation. OwnerId *string `locationName:"ownerId" type:"string"` // The current state of the Capacity Reservation. A Capacity Reservation can @@ -36527,11 +36886,11 @@ type CapacityReservation struct { // * active - The Capacity Reservation is active and the capacity is available // for your use. // - // * cancelled - The Capacity Reservation expired automatically at the date + // * expired - The Capacity Reservation expired automatically at the date // and time specified in your request. The reserved capacity is no longer // available for your use. // - // * expired - The Capacity Reservation was manually cancelled. The reserved + // * cancelled - The Capacity Reservation was manually cancelled. The reserved // capacity is no longer available for your use. // // * pending - The Capacity Reservation request was successful but the capacity @@ -36555,7 +36914,8 @@ type CapacityReservation struct { // that is dedicated to a single AWS account. Tenancy *string `locationName:"tenancy" type:"string" enum:"CapacityReservationTenancy"` - // The number of instances for which the Capacity Reservation reserves capacity. + // The total number of instances for which the Capacity Reservation reserves + // capacity. TotalInstanceCount *int64 `locationName:"totalInstanceCount" type:"integer"` } @@ -37515,8 +37875,7 @@ type ClientVpnEndpoint struct { // The ARN of the server certificate. ServerCertificateArn *string `locationName:"serverCertificateArn" type:"string"` - // Indicates whether split-tunnel is enabled in the AWS Client VPN endpoint - // endpoint. + // Indicates whether split-tunnel is enabled in the AWS Client VPN endpoint. // // For information about split-tunnel VPN endpoints, see Split-Tunnel AWS Client // VPN Endpoint (https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/split-tunnel-vpn.html) @@ -38649,9 +39008,8 @@ type CpuOptionsRequest struct { // The number of CPU cores for the instance. CoreCount *int64 `type:"integer"` - // The number of threads per CPU core. To disable Intel Hyper-Threading Technology - // for the instance, specify a value of 1. Otherwise, specify the default value - // of 2. + // The number of threads per CPU core. To disable multithreading for the instance, + // specify a value of 1. Otherwise, specify the default value of 2. ThreadsPerCore *int64 `type:"integer"` } @@ -38683,6 +39041,7 @@ type CreateCapacityReservationInput struct { // The Availability Zone in which to create the Capacity Reservation. AvailabilityZone *string `type:"string"` + // The ID of the Availability Zone in which to create the Capacity Reservation. AvailabilityZoneId *string `type:"string"` // Unique, case-sensitive identifier that you provide to ensure the idempotency @@ -38958,8 +39317,7 @@ type CreateClientVpnEndpointInput struct { // Information about the DNS servers to be used for DNS resolution. A Client // VPN endpoint can have up to two DNS servers. If no DNS server is specified, - // the DNS address of the VPC that is to be associated with Client VPN endpoint - // is used as the DNS server. + // the DNS address configured on the device is used for the DNS server. DnsServers []*string `locationNameList:"item" type:"list"` // Checks whether you have the required permissions for the action, without @@ -38974,8 +39332,7 @@ type CreateClientVpnEndpointInput struct { // ServerCertificateArn is a required field ServerCertificateArn *string `type:"string" required:"true"` - // Indicates whether split-tunnel is enabled on the AWS Client VPN endpoint - // endpoint. + // Indicates whether split-tunnel is enabled on the AWS Client VPN endpoint. // // By default, split-tunnel on a VPN endpoint is disabled. // @@ -39274,6 +39631,9 @@ type CreateCustomerGatewayInput struct { // BgpAsn is a required field BgpAsn *int64 `type:"integer" required:"true"` + // The Amazon Resource Name (ARN) for the customer gateway certificate. + CertificateArn *string `type:"string"` + // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have // the required permissions, the error response is DryRunOperation. Otherwise, @@ -39282,9 +39642,7 @@ type CreateCustomerGatewayInput struct { // The Internet-routable IP address for the customer gateway's outside interface. // The address must be static. - // - // PublicIp is a required field - PublicIp *string `locationName:"IpAddress" type:"string" required:"true"` + PublicIp *string `locationName:"IpAddress" type:"string"` // The type of VPN connection that this customer gateway supports (ipsec.1). // @@ -39308,9 +39666,6 @@ func (s *CreateCustomerGatewayInput) Validate() error { if s.BgpAsn == nil { invalidParams.Add(request.NewErrParamRequired("BgpAsn")) } - if s.PublicIp == nil { - invalidParams.Add(request.NewErrParamRequired("PublicIp")) - } if s.Type == nil { invalidParams.Add(request.NewErrParamRequired("Type")) } @@ -39327,6 +39682,12 @@ func (s *CreateCustomerGatewayInput) SetBgpAsn(v int64) *CreateCustomerGatewayIn return s } +// SetCertificateArn sets the CertificateArn field's value. +func (s *CreateCustomerGatewayInput) SetCertificateArn(v string) *CreateCustomerGatewayInput { + s.CertificateArn = &v + return s +} + // SetDryRun sets the DryRun field's value. func (s *CreateCustomerGatewayInput) SetDryRun(v bool) *CreateCustomerGatewayInput { s.DryRun = &v @@ -40835,7 +41196,9 @@ type CreateLaunchTemplateVersionInput struct { // The version number of the launch template version on which to base the new // version. The new version inherits the same launch parameters as the source - // version, except for parameters that you specify in LaunchTemplateData. + // version, except for parameters that you specify in LaunchTemplateData. Snapshots + // applied to the block device mapping are ignored when creating a new version + // unless they are explicitly included. SourceVersion *string `type:"string"` // A description for the version of the launch template. @@ -43607,10 +43970,7 @@ type CreateVolumeInput struct { // IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard // for Magnetic volumes. // - // Defaults: If no volume type is specified, the default is standard in us-east-1, - // eu-west-1, eu-central-1, us-west-2, us-west-1, sa-east-1, ap-northeast-1, - // ap-northeast-2, ap-southeast-1, ap-southeast-2, ap-south-1, us-gov-west-1, - // and cn-north-1. In all other Regions, EBS defaults to gp2. + // Default: gp2 VolumeType *string `type:"string" enum:"VolumeType"` } @@ -44718,6 +45078,9 @@ type CustomerGateway struct { // (ASN). BgpAsn *string `locationName:"bgpAsn" type:"string"` + // The Amazon Resource Name (ARN) for the customer gateway certificate. + CertificateArn *string `locationName:"certificateArn" type:"string"` + // The ID of the customer gateway. CustomerGatewayId *string `locationName:"customerGatewayId" type:"string"` @@ -44751,6 +45114,12 @@ func (s *CustomerGateway) SetBgpAsn(v string) *CustomerGateway { return s } +// SetCertificateArn sets the CertificateArn field's value. +func (s *CustomerGateway) SetCertificateArn(v string) *CustomerGateway { + s.CertificateArn = &v + return s +} + // SetCustomerGatewayId sets the CustomerGatewayId field's value. func (s *CustomerGateway) SetCustomerGatewayId(v string) *CustomerGateway { s.CustomerGatewayId = &v @@ -49997,6 +50366,115 @@ func (s *DescribeElasticGpusOutput) SetNextToken(v string) *DescribeElasticGpusO return s } +type DescribeExportImageTasksInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The IDs of the export image tasks. + ExportImageTaskIds []*string `locationName:"ExportImageTaskId" locationNameList:"ExportImageTaskId" type:"list"` + + // Filter tasks using the task-state filter and one of the following values: + // active, completed, deleting, or deleted. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return in a single call. + MaxResults *int64 `min:"1" type:"integer"` + + // A token that indicates the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeExportImageTasksInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportImageTasksInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeExportImageTasksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeExportImageTasksInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeExportImageTasksInput) SetDryRun(v bool) *DescribeExportImageTasksInput { + s.DryRun = &v + return s +} + +// SetExportImageTaskIds sets the ExportImageTaskIds field's value. +func (s *DescribeExportImageTasksInput) SetExportImageTaskIds(v []*string) *DescribeExportImageTasksInput { + s.ExportImageTaskIds = v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeExportImageTasksInput) SetFilters(v []*Filter) *DescribeExportImageTasksInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeExportImageTasksInput) SetMaxResults(v int64) *DescribeExportImageTasksInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeExportImageTasksInput) SetNextToken(v string) *DescribeExportImageTasksInput { + s.NextToken = &v + return s +} + +type DescribeExportImageTasksOutput struct { + _ struct{} `type:"structure"` + + // Information about the export image tasks. + ExportImageTasks []*ExportImageTask `locationName:"exportImageTaskSet" locationNameList:"item" type:"list"` + + // The token to use to get the next page of results. This value is null when + // there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeExportImageTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeExportImageTasksOutput) GoString() string { + return s.String() +} + +// SetExportImageTasks sets the ExportImageTasks field's value. +func (s *DescribeExportImageTasksOutput) SetExportImageTasks(v []*ExportImageTask) *DescribeExportImageTasksOutput { + s.ExportImageTasks = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeExportImageTasksOutput) SetNextToken(v string) *DescribeExportImageTasksOutput { + s.NextToken = &v + return s +} + type DescribeExportTasksInput struct { _ struct{} `type:"structure"` @@ -51807,14 +52285,13 @@ type DescribeImportImageTasksInput struct { DryRun *bool `type:"boolean"` // Filter tasks using the task-state filter and one of the following values: - // active, completed, deleting, deleted. + // active, completed, deleting, or deleted. Filters []*Filter `locationNameList:"Filter" type:"list"` - // A list of import image task IDs. + // The IDs of the import image tasks. ImportTaskIds []*string `locationName:"ImportTaskId" locationNameList:"ImportTaskId" type:"list"` - // The maximum number of results to return in a single call. To retrieve the - // remaining results, make another call with the returned NextToken value. + // The maximum number of results to return in a single call. MaxResults *int64 `type:"integer"` // A token that indicates the next page of results. @@ -54549,6 +55026,9 @@ type DescribeRegionsInput struct { // // * endpoint - The endpoint of the Region (for example, ec2.us-east-1.amazonaws.com). // + // * opt-in-status - The opt-in status of the Region (opt-in-not-required + // | opted-in | not-opted-in). + // // * region-name - The name of the Region (for example, us-east-1). Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -60360,6 +60840,19 @@ type DetachNetworkInterfaceInput struct { DryRun *bool `locationName:"dryRun" type:"boolean"` // Specifies whether to force a detachment. + // + // * Use the Force parameter only as a last resort to detach a network interface + // from a failed instance. + // + // * If you use the Force parameter to detach a network interface, you might + // not be able to attach a different network interface to the same index + // on the instance without first stopping and starting the instance. + // + // * If you force the detachment of a network interface, the instance metadata + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) + // might not get updated. This means that the attributes associated with + // the detached network interface might still be visible. The instance metadata + // will get updated when you stop and start the instance. Force *bool `locationName:"force" type:"boolean"` } @@ -61909,9 +62402,10 @@ type EbsBlockDevice struct { // size. VolumeSize *int64 `locationName:"volumeSize" type:"integer"` - // The volume type. If you set the type to io1, you must also set the Iops property. + // The volume type. If you set the type to io1, you must also specify the IOPS + // that the volume supports. // - // Default: standard + // Default: gp2 VolumeType *string `locationName:"volumeType" type:"string" enum:"VolumeType"` } @@ -63035,6 +63529,295 @@ func (s *ExportClientVpnClientConfigurationOutput) SetClientConfiguration(v stri return s } +type ExportImageInput struct { + _ struct{} `type:"structure"` + + // Token to enable idempotency for export image requests. + ClientToken *string `type:"string" idempotencyToken:"true"` + + // A description of the image being exported. The maximum length is 255 bytes. + Description *string `type:"string"` + + // The disk image format. + // + // DiskImageFormat is a required field + DiskImageFormat *string `type:"string" required:"true" enum:"DiskImageFormat"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the image. + // + // ImageId is a required field + ImageId *string `type:"string" required:"true"` + + // The name of the role that grants VM Import/Export permission to export images + // to your S3 bucket. If this parameter is not specified, the default role is + // named 'vmimport'. + RoleName *string `type:"string"` + + // Information about the destination S3 bucket. The bucket must exist and grant + // WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. + // + // S3ExportLocation is a required field + S3ExportLocation *ExportTaskS3LocationRequest `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ExportImageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportImageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExportImageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExportImageInput"} + if s.DiskImageFormat == nil { + invalidParams.Add(request.NewErrParamRequired("DiskImageFormat")) + } + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } + if s.S3ExportLocation == nil { + invalidParams.Add(request.NewErrParamRequired("S3ExportLocation")) + } + if s.S3ExportLocation != nil { + if err := s.S3ExportLocation.Validate(); err != nil { + invalidParams.AddNested("S3ExportLocation", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *ExportImageInput) SetClientToken(v string) *ExportImageInput { + s.ClientToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ExportImageInput) SetDescription(v string) *ExportImageInput { + s.Description = &v + return s +} + +// SetDiskImageFormat sets the DiskImageFormat field's value. +func (s *ExportImageInput) SetDiskImageFormat(v string) *ExportImageInput { + s.DiskImageFormat = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ExportImageInput) SetDryRun(v bool) *ExportImageInput { + s.DryRun = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *ExportImageInput) SetImageId(v string) *ExportImageInput { + s.ImageId = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *ExportImageInput) SetRoleName(v string) *ExportImageInput { + s.RoleName = &v + return s +} + +// SetS3ExportLocation sets the S3ExportLocation field's value. +func (s *ExportImageInput) SetS3ExportLocation(v *ExportTaskS3LocationRequest) *ExportImageInput { + s.S3ExportLocation = v + return s +} + +type ExportImageOutput struct { + _ struct{} `type:"structure"` + + // A description of the image being exported. + Description *string `locationName:"description" type:"string"` + + // The disk image format for the exported image. + DiskImageFormat *string `locationName:"diskImageFormat" type:"string" enum:"DiskImageFormat"` + + // The ID of the export image task. + ExportImageTaskId *string `locationName:"exportImageTaskId" type:"string"` + + // The ID of the image. + ImageId *string `locationName:"imageId" type:"string"` + + // The percent complete of the export image task. + Progress *string `locationName:"progress" type:"string"` + + // The name of the role that grants VM Import/Export permission to export images + // to your S3 bucket. + RoleName *string `locationName:"roleName" type:"string"` + + // Information about the destination S3 bucket. + S3ExportLocation *ExportTaskS3Location `locationName:"s3ExportLocation" type:"structure"` + + // The status of the export image task. The possible values are active, completed, + // deleting, and deleted. + Status *string `locationName:"status" type:"string"` + + // The status message for the export image task. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s ExportImageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportImageOutput) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *ExportImageOutput) SetDescription(v string) *ExportImageOutput { + s.Description = &v + return s +} + +// SetDiskImageFormat sets the DiskImageFormat field's value. +func (s *ExportImageOutput) SetDiskImageFormat(v string) *ExportImageOutput { + s.DiskImageFormat = &v + return s +} + +// SetExportImageTaskId sets the ExportImageTaskId field's value. +func (s *ExportImageOutput) SetExportImageTaskId(v string) *ExportImageOutput { + s.ExportImageTaskId = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *ExportImageOutput) SetImageId(v string) *ExportImageOutput { + s.ImageId = &v + return s +} + +// SetProgress sets the Progress field's value. +func (s *ExportImageOutput) SetProgress(v string) *ExportImageOutput { + s.Progress = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *ExportImageOutput) SetRoleName(v string) *ExportImageOutput { + s.RoleName = &v + return s +} + +// SetS3ExportLocation sets the S3ExportLocation field's value. +func (s *ExportImageOutput) SetS3ExportLocation(v *ExportTaskS3Location) *ExportImageOutput { + s.S3ExportLocation = v + return s +} + +// SetStatus sets the Status field's value. +func (s *ExportImageOutput) SetStatus(v string) *ExportImageOutput { + s.Status = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *ExportImageOutput) SetStatusMessage(v string) *ExportImageOutput { + s.StatusMessage = &v + return s +} + +// Describes an export image task. +type ExportImageTask struct { + _ struct{} `type:"structure"` + + // A description of the image being exported. + Description *string `locationName:"description" type:"string"` + + // The ID of the export image task. + ExportImageTaskId *string `locationName:"exportImageTaskId" type:"string"` + + // The ID of the image. + ImageId *string `locationName:"imageId" type:"string"` + + // The percent complete of the export image task. + Progress *string `locationName:"progress" type:"string"` + + // Information about the destination S3 bucket. + S3ExportLocation *ExportTaskS3Location `locationName:"s3ExportLocation" type:"structure"` + + // The status of the export image task. The possible values are active, completed, + // deleting, and deleted. + Status *string `locationName:"status" type:"string"` + + // The status message for the export image task. + StatusMessage *string `locationName:"statusMessage" type:"string"` +} + +// String returns the string representation +func (s ExportImageTask) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportImageTask) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *ExportImageTask) SetDescription(v string) *ExportImageTask { + s.Description = &v + return s +} + +// SetExportImageTaskId sets the ExportImageTaskId field's value. +func (s *ExportImageTask) SetExportImageTaskId(v string) *ExportImageTask { + s.ExportImageTaskId = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *ExportImageTask) SetImageId(v string) *ExportImageTask { + s.ImageId = &v + return s +} + +// SetProgress sets the Progress field's value. +func (s *ExportImageTask) SetProgress(v string) *ExportImageTask { + s.Progress = &v + return s +} + +// SetS3ExportLocation sets the S3ExportLocation field's value. +func (s *ExportImageTask) SetS3ExportLocation(v *ExportTaskS3Location) *ExportImageTask { + s.S3ExportLocation = v + return s +} + +// SetStatus sets the Status field's value. +func (s *ExportImageTask) SetStatus(v string) *ExportImageTask { + s.Status = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *ExportImageTask) SetStatusMessage(v string) *ExportImageTask { + s.StatusMessage = &v + return s +} + // Describes an instance export task. type ExportTask struct { _ struct{} `type:"structure"` @@ -63104,6 +63887,87 @@ func (s *ExportTask) SetStatusMessage(v string) *ExportTask { return s } +// Describes the destination for an export image task. +type ExportTaskS3Location struct { + _ struct{} `type:"structure"` + + // The destination S3 bucket. + S3Bucket *string `locationName:"s3Bucket" type:"string"` + + // The prefix (logical hierarchy) in the bucket. + S3Prefix *string `locationName:"s3Prefix" type:"string"` +} + +// String returns the string representation +func (s ExportTaskS3Location) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTaskS3Location) GoString() string { + return s.String() +} + +// SetS3Bucket sets the S3Bucket field's value. +func (s *ExportTaskS3Location) SetS3Bucket(v string) *ExportTaskS3Location { + s.S3Bucket = &v + return s +} + +// SetS3Prefix sets the S3Prefix field's value. +func (s *ExportTaskS3Location) SetS3Prefix(v string) *ExportTaskS3Location { + s.S3Prefix = &v + return s +} + +// Describes the destination for an export image task. +type ExportTaskS3LocationRequest struct { + _ struct{} `type:"structure"` + + // The destination S3 bucket. + // + // S3Bucket is a required field + S3Bucket *string `type:"string" required:"true"` + + // The prefix (logical hierarchy) in the bucket. + S3Prefix *string `type:"string"` +} + +// String returns the string representation +func (s ExportTaskS3LocationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportTaskS3LocationRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExportTaskS3LocationRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExportTaskS3LocationRequest"} + if s.S3Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("S3Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3Bucket sets the S3Bucket field's value. +func (s *ExportTaskS3LocationRequest) SetS3Bucket(v string) *ExportTaskS3LocationRequest { + s.S3Bucket = &v + return s +} + +// SetS3Prefix sets the S3Prefix field's value. +func (s *ExportTaskS3LocationRequest) SetS3Prefix(v string) *ExportTaskS3LocationRequest { + s.S3Prefix = &v + return s +} + // Describes the format and location for an instance export task. type ExportToS3Task struct { _ struct{} `type:"structure"` @@ -64356,13 +65220,25 @@ func (s *FpgaImageState) SetMessage(v string) *FpgaImageState { type GetCapacityReservationUsageInput struct { _ struct{} `type:"structure"` + // The ID of the Capacity Reservation. + // // CapacityReservationId is a required field CapacityReservationId *string `type:"string" required:"true"` + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` + // The maximum number of results to return for the request in a single page. + // The remaining results can be seen by sending another request with the returned + // nextToken value. + // + // Valid range: Minimum value of 1. Maximum value of 1000. MaxResults *int64 `min:"1" type:"integer"` + // The token to retrieve the next page of results. NextToken *string `type:"string"` } @@ -64419,18 +65295,45 @@ func (s *GetCapacityReservationUsageInput) SetNextToken(v string) *GetCapacityRe type GetCapacityReservationUsageOutput struct { _ struct{} `type:"structure"` + // The remaining capacity. Indicates the number of instances that can be launched + // in the Capacity Reservation. AvailableInstanceCount *int64 `locationName:"availableInstanceCount" type:"integer"` + // The ID of the Capacity Reservation. CapacityReservationId *string `locationName:"capacityReservationId" type:"string"` + // The type of instance for which the Capacity Reservation reserves capacity. InstanceType *string `locationName:"instanceType" type:"string"` + // Information about the Capacity Reservation usage. InstanceUsages []*InstanceUsage `locationName:"instanceUsageSet" locationNameList:"item" type:"list"` + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. NextToken *string `locationName:"nextToken" type:"string"` + // The current state of the Capacity Reservation. A Capacity Reservation can + // be in one of the following states: + // + // * active - The Capacity Reservation is active and the capacity is available + // for your use. + // + // * expired - The Capacity Reservation expired automatically at the date + // and time specified in your request. The reserved capacity is no longer + // available for your use. + // + // * cancelled - The Capacity Reservation was manually cancelled. The reserved + // capacity is no longer available for your use. + // + // * pending - The Capacity Reservation request was successful but the capacity + // provisioning is still pending. + // + // * failed - The Capacity Reservation request has failed. A request might + // fail due to invalid request parameters, capacity constraints, or instance + // limit constraints. Failed requests are retained for 60 minutes. State *string `locationName:"state" type:"string" enum:"CapacityReservationState"` + // The number of instances for which the Capacity Reservation reserves capacity. TotalInstanceCount *int64 `locationName:"totalInstanceCount" type:"integer"` } @@ -69240,8 +70143,8 @@ type InstanceNetworkInterfaceSpecification struct { // request. SecondaryPrivateIpAddressCount *int64 `locationName:"secondaryPrivateIpAddressCount" type:"integer"` - // The ID of the subnet associated with the network string. Applies only if - // creating a network interface when launching an instance. + // The ID of the subnet associated with the network interface. Applies only + // if creating a network interface when launching an instance. SubnetId *string `locationName:"subnetId" type:"string"` } @@ -69740,11 +70643,14 @@ func (s *InstanceStatusSummary) SetStatus(v string) *InstanceStatusSummary { return s } +// Information about the Capacity Reservation usage. type InstanceUsage struct { _ struct{} `type:"structure"` + // The ID of the AWS account that is making use of the Capacity Reservation. AccountId *string `locationName:"accountId" type:"string"` + // The number of instances the AWS account currently has in the Capacity Reservation. UsedInstanceCount *int64 `locationName:"usedInstanceCount" type:"integer"` } @@ -70706,9 +71612,8 @@ type LaunchTemplateCpuOptionsRequest struct { // The number of CPU cores for the instance. CoreCount *int64 `type:"integer"` - // The number of threads per CPU core. To disable Intel Hyper-Threading Technology - // for the instance, specify a value of 1. Otherwise, specify the default value - // of 2. + // The number of threads per CPU core. To disable multithreading for the instance, + // specify a value of 1. Otherwise, specify the default value of 2. ThreadsPerCore *int64 `type:"integer"` } @@ -72369,7 +73274,7 @@ func (s *ModifyCapacityReservationInput) SetInstanceCount(v int64) *ModifyCapaci type ModifyCapacityReservationOutput struct { _ struct{} `type:"structure"` - // Information about the Capacity Reservation. + // Returns true if the request succeeds; otherwise, it returns an error. Return *bool `locationName:"return" type:"boolean"` } @@ -75389,8 +76294,7 @@ type ModifyVpcEndpointInput struct { DryRun *bool `type:"boolean"` // A policy to attach to the endpoint that controls access to the service. The - // policy must be in valid JSON format. If this parameter is not specified, - // we attach a default policy that allows full access to the service. + // policy must be in valid JSON format. PolicyDocument *string `type:"string"` // (Interface endpoint) Indicate whether a private hosted zone is associated @@ -75915,6 +76819,9 @@ func (s *ModifyVpcTenancyOutput) SetReturnValue(v bool) *ModifyVpcTenancyOutput type ModifyVpnConnectionInput struct { _ struct{} `type:"structure"` + // The ID of the customer gateway at your end of the VPN connection. + CustomerGatewayId *string `type:"string"` + // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have // the required permissions, the error response is DryRunOperation. Otherwise, @@ -75956,6 +76863,12 @@ func (s *ModifyVpnConnectionInput) Validate() error { return nil } +// SetCustomerGatewayId sets the CustomerGatewayId field's value. +func (s *ModifyVpnConnectionInput) SetCustomerGatewayId(v string) *ModifyVpnConnectionInput { + s.CustomerGatewayId = &v + return s +} + // SetDryRun sets the DryRun field's value. func (s *ModifyVpnConnectionInput) SetDryRun(v bool) *ModifyVpnConnectionInput { s.DryRun = &v @@ -76003,6 +76916,93 @@ func (s *ModifyVpnConnectionOutput) SetVpnConnection(v *VpnConnection) *ModifyVp return s } +type ModifyVpnTunnelCertificateInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the AWS Site-to-Site VPN connection. + // + // VpnConnectionId is a required field + VpnConnectionId *string `type:"string" required:"true"` + + // The external IP address of the VPN tunnel. + // + // VpnTunnelOutsideIpAddress is a required field + VpnTunnelOutsideIpAddress *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyVpnTunnelCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpnTunnelCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyVpnTunnelCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyVpnTunnelCertificateInput"} + if s.VpnConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("VpnConnectionId")) + } + if s.VpnTunnelOutsideIpAddress == nil { + invalidParams.Add(request.NewErrParamRequired("VpnTunnelOutsideIpAddress")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyVpnTunnelCertificateInput) SetDryRun(v bool) *ModifyVpnTunnelCertificateInput { + s.DryRun = &v + return s +} + +// SetVpnConnectionId sets the VpnConnectionId field's value. +func (s *ModifyVpnTunnelCertificateInput) SetVpnConnectionId(v string) *ModifyVpnTunnelCertificateInput { + s.VpnConnectionId = &v + return s +} + +// SetVpnTunnelOutsideIpAddress sets the VpnTunnelOutsideIpAddress field's value. +func (s *ModifyVpnTunnelCertificateInput) SetVpnTunnelOutsideIpAddress(v string) *ModifyVpnTunnelCertificateInput { + s.VpnTunnelOutsideIpAddress = &v + return s +} + +type ModifyVpnTunnelCertificateOutput struct { + _ struct{} `type:"structure"` + + // Describes a VPN connection. + VpnConnection *VpnConnection `locationName:"vpnConnection" type:"structure"` +} + +// String returns the string representation +func (s ModifyVpnTunnelCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpnTunnelCertificateOutput) GoString() string { + return s.String() +} + +// SetVpnConnection sets the VpnConnection field's value. +func (s *ModifyVpnTunnelCertificateOutput) SetVpnConnection(v *VpnConnection) *ModifyVpnTunnelCertificateOutput { + s.VpnConnection = v + return s +} + type MonitorInstancesInput struct { _ struct{} `type:"structure"` @@ -80982,7 +81982,9 @@ type RequestSpotLaunchSpecification struct { // you can specify the names or the IDs of the security groups. SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"item" type:"list"` - // The ID of the subnet in which to launch the instance. + // The IDs of the subnets in which to launch the instance. To specify multiple + // subnets, separate them using commas; for example, "subnet-1234abcdeexample1, + // subnet-0987cdef6example2". SubnetId *string `locationName:"subnetId" type:"string"` // The Base64-encoded user data for the instance. User data is limited to 16 @@ -84640,7 +85642,7 @@ type ScheduledInstancesEbs struct { // The volume type. gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, // Throughput Optimized HDD for st1, Cold HDD for sc1, or standard for Magnetic. // - // Default: standard + // Default: gp2 VolumeType *string `type:"string"` } @@ -85165,7 +86167,7 @@ type SearchTransitGatewayRoutesInput struct { // // * state - The state of the route (active | blackhole). // - // * type - The type of roue (propagated | static). + // * type - The type of route (propagated | static). // // Filters is a required field Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list" required:"true"` @@ -85426,6 +86428,70 @@ func (s *SecurityGroupReference) SetVpcPeeringConnectionId(v string) *SecurityGr return s } +type SendDiagnosticInterruptInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the instance. + // + // InstanceId is a required field + InstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendDiagnosticInterruptInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendDiagnosticInterruptInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SendDiagnosticInterruptInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SendDiagnosticInterruptInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *SendDiagnosticInterruptInput) SetDryRun(v bool) *SendDiagnosticInterruptInput { + s.DryRun = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *SendDiagnosticInterruptInput) SetInstanceId(v string) *SendDiagnosticInterruptInput { + s.InstanceId = &v + return s +} + +type SendDiagnosticInterruptOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SendDiagnosticInterruptOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendDiagnosticInterruptOutput) GoString() string { + return s.String() +} + // Describes a service configuration for a VPC endpoint service. type ServiceConfiguration struct { _ struct{} `type:"structure"` @@ -86378,7 +87444,7 @@ type SpotFleetLaunchSpecification struct { // Deprecated. AddressingType *string `locationName:"addressingType" type:"string"` - // One or more block devices that are mapped to the Spot instances. You can't + // One or more block devices that are mapped to the Spot Instances. You can't // specify both a snapshot ID and an encryption value. This is because only // blank volumes can be encrypted on creation. If a snapshot is the basis for // a volume, it is not blank and its encryption status is used for the volume @@ -86436,8 +87502,9 @@ type SpotFleetLaunchSpecification struct { // by the value of WeightedCapacity. SpotPrice *string `locationName:"spotPrice" type:"string"` - // The ID of the subnet in which to launch the instances. To specify multiple - // subnets, separate them using commas; for example, "subnet-a61dafcf, subnet-65ea5f08". + // The IDs of the subnets in which to launch the instances. To specify multiple + // subnets, separate them using commas; for example, "subnet-1234abcdeexample1, + // subnet-0987cdef6example2". SubnetId *string `locationName:"subnetId" type:"string"` // The tags to apply during creation. @@ -86668,8 +87735,19 @@ func (s *SpotFleetRequestConfig) SetSpotFleetRequestState(v string) *SpotFleetRe type SpotFleetRequestConfigData struct { _ struct{} `type:"structure"` - // Indicates how to allocate the target capacity across the Spot pools specified - // by the Spot Fleet request. The default is lowestPrice. + // Indicates how to allocate the target Spot Instance capacity across the Spot + // Instance pools specified by the Spot Fleet request. + // + // If the allocation strategy is lowestPrice, Spot Fleet launches instances + // from the Spot Instance pools with the lowest price. This is the default allocation + // strategy. + // + // If the allocation strategy is diversified, Spot Fleet launches instances + // from all the Spot Instance pools that you specify. + // + // If the allocation strategy is capacityOptimized, Spot Fleet launches instances + // from Spot Instance pools with optimal capacity for the number of instances + // that are launching. AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"AllocationStrategy"` // A unique, case-sensitive identifier that you provide to ensure the idempotency @@ -87362,8 +88440,19 @@ func (s *SpotMarketOptions) SetValidUntil(v time.Time) *SpotMarketOptions { type SpotOptions struct { _ struct{} `type:"structure"` - // Indicates how to allocate the target capacity across the Spot pools specified - // by the Spot Fleet request. The default is lowest-price. + // Indicates how to allocate the target Spot Instance capacity across the Spot + // Instance pools specified by the EC2 Fleet. + // + // If the allocation strategy is lowestPrice, EC2 Fleet launches instances from + // the Spot Instance pools with the lowest price. This is the default allocation + // strategy. + // + // If the allocation strategy is diversified, EC2 Fleet launches instances from + // all the Spot Instance pools that you specify. + // + // If the allocation strategy is capacityOptimized, EC2 Fleet launches instances + // from Spot Instance pools with optimal capacity for the number of instances + // that are launching. AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"SpotAllocationStrategy"` // The behavior when a Spot Instance is interrupted. The default is terminate. @@ -87447,8 +88536,19 @@ func (s *SpotOptions) SetSingleInstanceType(v bool) *SpotOptions { type SpotOptionsRequest struct { _ struct{} `type:"structure"` - // Indicates how to allocate the target capacity across the Spot pools specified - // by the Spot Fleet request. The default is lowestPrice. + // Indicates how to allocate the target Spot Instance capacity across the Spot + // Instance pools specified by the EC2 Fleet. + // + // If the allocation strategy is lowestPrice, EC2 Fleet launches instances from + // the Spot Instance pools with the lowest price. This is the default allocation + // strategy. + // + // If the allocation strategy is diversified, EC2 Fleet launches instances from + // all the Spot Instance pools that you specify. + // + // If the allocation strategy is capacityOptimized, EC2 Fleet launches instances + // from Spot Instance pools with optimal capacity for the number of instances + // that are launching. AllocationStrategy *string `type:"string" enum:"SpotAllocationStrategy"` // The behavior when a Spot Instance is interrupted. The default is terminate. @@ -91222,6 +92322,9 @@ type VgwTelemetry struct { // The number of accepted routes. AcceptedRouteCount *int64 `locationName:"acceptedRouteCount" type:"integer"` + // The Amazon Resource Name (ARN) of the VPN tunnel endpoint certificate. + CertificateArn *string `locationName:"certificateArn" type:"string"` + // The date and time of the last change in status. LastStatusChange *time.Time `locationName:"lastStatusChange" type:"timestamp"` @@ -91252,6 +92355,12 @@ func (s *VgwTelemetry) SetAcceptedRouteCount(v int64) *VgwTelemetry { return s } +// SetCertificateArn sets the CertificateArn field's value. +func (s *VgwTelemetry) SetCertificateArn(v string) *VgwTelemetry { + s.CertificateArn = &v + return s +} + // SetLastStatusChange sets the LastStatusChange field's value. func (s *VgwTelemetry) SetLastStatusChange(v time.Time) *VgwTelemetry { s.LastStatusChange = &v @@ -94333,6 +95442,9 @@ const ( // InstanceTypeI3en24xlarge is a InstanceType enum value InstanceTypeI3en24xlarge = "i3en.24xlarge" + // InstanceTypeI3enMetal is a InstanceType enum value + InstanceTypeI3enMetal = "i3en.metal" + // InstanceTypeHi14xlarge is a InstanceType enum value InstanceTypeHi14xlarge = "hi1.4xlarge" diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go index 7b42719d65335..7c9fccd7c8f52 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go @@ -38,8 +38,8 @@ func customRetryRule(r *request.Request) time.Duration { count = len(retryTimes) - 1 } - minTime := int(retryTimes[count]) - return time.Duration(sdkrand.SeededRand.Intn(minTime) + minTime) + minTime := int64(retryTimes[count]) + return time.Duration(sdkrand.SeededRand.Int63n(minTime) + minTime) } func setCustomRetryer(c *client.Client) { diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go index e403b84a44287..31c314e0e5f52 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/doc.go @@ -9,7 +9,7 @@ // // To learn more, see the following resources: // -// * Amazon EC2: Amazon EC2 product page (http://aws.amazon.com/ec2), Amazon +// * Amazon EC2: AmazonEC2 product page (http://aws.amazon.com/ec2), Amazon // EC2 documentation (http://aws.amazon.com/documentation/ec2) // // * Amazon EBS: Amazon EBS product page (http://aws.amazon.com/ebs), Amazon diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go new file mode 100644 index 0000000000000..d5307fcaa0f3a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go @@ -0,0 +1,11 @@ +package sts + +import "github.com/aws/aws-sdk-go/aws/request" + +func init() { + initRequest = customizeRequest +} + +func customizeRequest(r *request.Request) { + r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeIDPCommunicationErrorException) +} diff --git a/vendor/github.com/cortexproject/cortex/COPYING.LGPL-3 b/vendor/github.com/cortexproject/cortex/COPYING.LGPL-3 deleted file mode 100644 index 89c7d69ec7c80..0000000000000 --- a/vendor/github.com/cortexproject/cortex/COPYING.LGPL-3 +++ /dev/null @@ -1,175 +0,0 @@ -./tools/integration/assert.sh is a copy of - - https://github.com/lehmannro/assert.sh/blob/master/assert.sh - -Since it was imported from its original source, it has only received -cosmetic modifications. As it is licensed under the LGPL-3, here's the -license text in its entirety: - - - - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go index 4810bf3cf60d2..a81ef7323bca7 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go @@ -28,7 +28,6 @@ import ( "github.com/cortexproject/cortex/pkg/util/spanlogger" awscommon "github.com/weaveworks/common/aws" "github.com/weaveworks/common/instrument" - "github.com/weaveworks/common/user" ) const ( @@ -127,6 +126,7 @@ func (cfg *DynamoDBConfig) RegisterFlags(f *flag.FlagSet) { type StorageConfig struct { DynamoDBConfig S3 flagext.URLValue + BucketNames string S3ForcePathStyle bool } @@ -137,6 +137,7 @@ func (cfg *StorageConfig) RegisterFlags(f *flag.FlagSet) { f.Var(&cfg.S3, "s3.url", "S3 endpoint URL with escaped Key and Secret encoded. "+ "If only region is specified as a host, proper endpoint will be deduced. Use inmemory:/// to use a mock in-memory implementation.") f.BoolVar(&cfg.S3ForcePathStyle, "s3.force-path-style", false, "Set this to `true` to force the request to use path-style addressing.") + f.StringVar(&cfg.BucketNames, "s3.buckets", "", "Comma separated list of bucket names to evenly distribute chunks over. Overrides any buckets specified in s3.url flag") } type dynamoDBStorageClient struct { @@ -193,7 +194,6 @@ func (a dynamoDBStorageClient) NewWriteBatch() chunk.WriteBatch { } func logWriteRetry(ctx context.Context, unprocessed dynamoDBWriteBatch) { - userID, _ := user.ExtractOrgID(ctx) for table, reqs := range unprocessed { dynamoThrottled.WithLabelValues("DynamoDB.BatchWriteItem", table).Add(float64(len(reqs))) for _, req := range reqs { @@ -207,7 +207,7 @@ func logWriteRetry(ctx context.Context, unprocessed dynamoDBWriteBatch) { if rangeAttr, ok := item[rangeKey]; ok { rnge = string(rangeAttr.B) } - util.Event().Log("msg", "store retry", "table", table, "userID", userID, "hashKey", hash, "rangeKey", rnge) + util.Event().Log("msg", "store retry", "table", table, "hashKey", hash, "rangeKey", rnge) } } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go index dfb74a42584f5..7b05128e4a18a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "hash/fnv" "io/ioutil" "strings" @@ -33,8 +34,8 @@ func init() { } type s3ObjectClient struct { - bucketName string - S3 s3iface.S3API + bucketNames []string + S3 s3iface.S3API } // NewS3ObjectClient makes a new S3-backed ObjectClient. @@ -51,10 +52,13 @@ func NewS3ObjectClient(cfg StorageConfig, schemaCfg chunk.SchemaConfig) (chunk.O s3Config = s3Config.WithMaxRetries(0) // We do our own retries, so we can monitor them s3Client := s3.New(session.New(s3Config)) - bucketName := strings.TrimPrefix(cfg.S3.URL.Path, "/") + bucketNames := []string{strings.TrimPrefix(cfg.S3.URL.Path, "/")} + if cfg.BucketNames != "" { + bucketNames = strings.Split(cfg.BucketNames, ",") // comma separated list of bucket names + } client := s3ObjectClient{ - S3: s3Client, - bucketName: bucketName, + S3: s3Client, + bucketNames: bucketNames, } return client, nil } @@ -68,11 +72,16 @@ func (a s3ObjectClient) GetChunks(ctx context.Context, chunks []chunk.Chunk) ([] func (a s3ObjectClient) getChunk(ctx context.Context, decodeContext *chunk.DecodeContext, c chunk.Chunk) (chunk.Chunk, error) { var resp *s3.GetObjectOutput + + // Map the key into a bucket + key := c.ExternalKey() + bucket := a.bucketFromKey(key) + err := instrument.CollectedRequest(ctx, "S3.GetObject", s3RequestDuration, instrument.ErrorCode, func(ctx context.Context) error { var err error resp, err = a.S3.GetObjectWithContext(ctx, &s3.GetObjectInput{ - Bucket: aws.String(a.bucketName), - Key: aws.String(c.ExternalKey()), + Bucket: aws.String(bucket), + Key: aws.String(key), }) return err }) @@ -128,9 +137,22 @@ func (a s3ObjectClient) putS3Chunk(ctx context.Context, key string, buf []byte) return instrument.CollectedRequest(ctx, "S3.PutObject", s3RequestDuration, instrument.ErrorCode, func(ctx context.Context) error { _, err := a.S3.PutObjectWithContext(ctx, &s3.PutObjectInput{ Body: bytes.NewReader(buf), - Bucket: aws.String(a.bucketName), + Bucket: aws.String(a.bucketFromKey(key)), Key: aws.String(key), }) return err }) } + +// bucketFromKey maps a key to a bucket name +func (a s3ObjectClient) bucketFromKey(key string) string { + if len(a.bucketNames) == 0 { + return "" + } + + hasher := fnv.New32a() + hasher.Write([]byte(key)) + hash := hasher.Sum32() + + return a.bucketNames[hash%uint32(len(a.bucketNames))] +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/cache.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/cache.go index bb649529a979a..540b03de4539f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/cache.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/cache.go @@ -2,6 +2,7 @@ package cache import ( "context" + "errors" "flag" "time" ) @@ -28,6 +29,7 @@ type Config struct { Background BackgroundConfig `yaml:"background,omitempty"` Memcache MemcachedConfig `yaml:"memcached,omitempty"` MemcacheClient MemcachedClientConfig `yaml:"memcached_client,omitempty"` + Redis RedisConfig `yaml:"redis,omitempty"` Fifocache FifoCacheConfig `yaml:"fifocache,omitempty"` // This is to name the cache metrics properly. @@ -42,6 +44,7 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, description string, f cfg.Background.RegisterFlagsWithPrefix(prefix, description, f) cfg.Memcache.RegisterFlagsWithPrefix(prefix, description, f) cfg.MemcacheClient.RegisterFlagsWithPrefix(prefix, description, f) + cfg.Redis.RegisterFlagsWithPrefix(prefix, description, f) cfg.Fifocache.RegisterFlagsWithPrefix(prefix, description, f) f.BoolVar(&cfg.EnableFifoCache, prefix+"cache.enable-fifocache", false, description+"Enable in-memory cache.") @@ -67,6 +70,10 @@ func New(cfg Config) (Cache, error) { caches = append(caches, Instrument(cfg.Prefix+"fifocache", cache)) } + if cfg.MemcacheClient.Host != "" && cfg.Redis.Endpoint != "" { + return nil, errors.New("use of multiple cache storage systems is not supported") + } + if cfg.MemcacheClient.Host != "" { if cfg.Memcache.Expiration == 0 && cfg.DefaultValidity != 0 { cfg.Memcache.Expiration = cfg.DefaultValidity @@ -79,6 +86,15 @@ func New(cfg Config) (Cache, error) { caches = append(caches, NewBackground(cacheName, cfg.Background, Instrument(cacheName, cache))) } + if cfg.Redis.Endpoint != "" { + if cfg.Redis.Expiration == 0 && cfg.DefaultValidity != 0 { + cfg.Redis.Expiration = cfg.DefaultValidity + } + cacheName := cfg.Prefix + "redis" + cache := NewRedisCache(cfg.Redis, cacheName, nil) + caches = append(caches, NewBackground(cacheName, cfg.Background, Instrument(cacheName, cache))) + } + cache := NewTiered(caches) if len(caches) > 1 { cache = Instrument(cfg.Prefix+"tiered", cache) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client.go index d8ece7594dbf7..73ece246cf16b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client.go @@ -93,19 +93,19 @@ func (c *memcachedClient) Stop() { c.wait.Wait() } -func (c *memcachedClient) updateLoop(updateInterval time.Duration) error { +func (c *memcachedClient) updateLoop(updateInterval time.Duration) { defer c.wait.Done() ticker := time.NewTicker(updateInterval) - var err error for { select { case <-ticker.C: - err = c.updateMemcacheServers() + err := c.updateMemcacheServers() if err != nil { level.Warn(util.Logger).Log("msg", "error updating memcache servers", "err", err) } case <-c.quit: ticker.Stop() + return } } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_cache.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_cache.go new file mode 100644 index 0000000000000..878033b89a944 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_cache.go @@ -0,0 +1,154 @@ +package cache + +import ( + "context" + "flag" + "time" + + "github.com/cortexproject/cortex/pkg/util" + "github.com/go-kit/kit/log/level" + "github.com/gomodule/redigo/redis" +) + +// RedisCache type caches chunks in redis +type RedisCache struct { + name string + expiration int + timeout time.Duration + pool *redis.Pool +} + +// RedisConfig defines how a RedisCache should be constructed. +type RedisConfig struct { + Endpoint string `yaml:"endpoint,omitempty"` + Timeout time.Duration `yaml:"timeout,omitempty"` + Expiration time.Duration `yaml:"expiration,omitempty"` + MaxIdleConns int `yaml:"max_idle_conns,omitempty"` + MaxActiveConns int `yaml:"max_active_conns,omitempty"` +} + +// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet +func (cfg *RedisConfig) RegisterFlagsWithPrefix(prefix, description string, f *flag.FlagSet) { + f.StringVar(&cfg.Endpoint, prefix+"redis.endpoint", "", description+"Redis service endpoint to use when caching chunks. If empty, no redis will be used.") + f.DurationVar(&cfg.Timeout, prefix+"redis.timeout", 100*time.Millisecond, description+"Maximum time to wait before giving up on redis requests.") + f.DurationVar(&cfg.Expiration, prefix+"redis.expiration", 0, description+"How long keys stay in the redis.") + f.IntVar(&cfg.MaxIdleConns, prefix+"redis.max-idle-conns", 80, description+"Maximum number of idle connections in pool.") + f.IntVar(&cfg.MaxActiveConns, prefix+"redis.max-active-conns", 0, description+"Maximum number of active connections in pool.") +} + +// NewRedisCache creates a new RedisCache +func NewRedisCache(cfg RedisConfig, name string, pool *redis.Pool) *RedisCache { + // pool != nil only in unit tests + if pool == nil { + pool = &redis.Pool{ + MaxIdle: cfg.MaxIdleConns, + MaxActive: cfg.MaxActiveConns, + Dial: func() (redis.Conn, error) { + c, err := redis.Dial("tcp", cfg.Endpoint) + if err != nil { + return nil, err + } + return c, err + }, + } + } + + cache := &RedisCache{ + expiration: int(cfg.Expiration.Seconds()), + timeout: cfg.Timeout, + name: name, + pool: pool, + } + + if err := cache.ping(context.Background()); err != nil { + level.Error(util.Logger).Log("msg", "error connecting to redis", "endpoint", cfg.Endpoint, "err", err) + } + + return cache +} + +// Fetch gets keys from the cache. The keys that are found must be in the order of the keys requested. +func (c *RedisCache) Fetch(ctx context.Context, keys []string) (found []string, bufs [][]byte, missed []string) { + data, err := c.mget(ctx, keys) + + if err != nil { + level.Error(util.Logger).Log("msg", "failed to get from redis", "name", c.name, "err", err) + missed = make([]string, len(keys)) + copy(missed, keys) + return + } + for i, key := range keys { + if data[i] != nil { + found = append(found, key) + bufs = append(bufs, data[i]) + } else { + missed = append(missed, key) + } + } + return +} + +// Store stores the key in the cache. +func (c *RedisCache) Store(ctx context.Context, keys []string, bufs [][]byte) { + err := c.mset(ctx, keys, bufs, c.expiration) + if err != nil { + level.Error(util.Logger).Log("msg", "failed to put to redis", "name", c.name, "err", err) + } +} + +// Stop stops the redis client. +func (c *RedisCache) Stop() error { + return c.pool.Close() +} + +// mset adds key-value pairs to the cache. +func (c *RedisCache) mset(ctx context.Context, keys []string, bufs [][]byte, ttl int) error { + conn := c.pool.Get() + defer conn.Close() + + if err := conn.Send("MULTI"); err != nil { + return err + } + for i := range keys { + if err := conn.Send("SETEX", keys[i], ttl, bufs[i]); err != nil { + return err + } + } + _, err := redis.DoWithTimeout(conn, c.timeout, "EXEC") + return err +} + +// mget retrieves values from the cache. +func (c *RedisCache) mget(ctx context.Context, keys []string) ([][]byte, error) { + intf := make([]interface{}, len(keys)) + for i, key := range keys { + intf[i] = key + } + + conn := c.pool.Get() + defer conn.Close() + + return redis.ByteSlices(redis.DoWithTimeout(conn, c.timeout, "MGET", intf...)) +} + +func (c *RedisCache) ping(ctx context.Context) error { + conn := c.pool.Get() + defer conn.Close() + + pong, err := redis.DoWithTimeout(conn, c.timeout, "PING") + if err == nil { + _, err = redis.String(pong, err) + } + return err +} + +func redisStatusCode(err error) string { + switch err { + case nil: + return "200" + case redis.ErrNil: + return "404" + default: + return "500" + } +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk.go index c69e1be2a18db..299f85a646654 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk.go @@ -336,7 +336,7 @@ func equalByKey(a, b Chunk) bool { // Samples returns all SamplePairs for the chunk. func (c *Chunk) Samples(from, through model.Time) ([]model.SamplePair, error) { - it := c.Data.NewIterator() + it := c.Data.NewIterator(nil) interval := metric.Interval{OldestInclusive: from, NewestInclusive: through} return prom_chunk.RangeValues(it, interval) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go index 43f3bd14728ab..7c8ba806c69b7 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go @@ -24,7 +24,6 @@ import ( "github.com/cortexproject/cortex/pkg/util/spanlogger" "github.com/cortexproject/cortex/pkg/util/validation" "github.com/weaveworks/common/httpgrpc" - "github.com/weaveworks/common/user" ) var ( @@ -161,13 +160,13 @@ func (c *store) calculateIndexEntries(userID string, from, through model.Time, c } // Get implements Store -func (c *store) Get(ctx context.Context, from, through model.Time, allMatchers ...*labels.Matcher) ([]Chunk, error) { +func (c *store) Get(ctx context.Context, userID string, from, through model.Time, allMatchers ...*labels.Matcher) ([]Chunk, error) { log, ctx := spanlogger.New(ctx, "ChunkStore.Get") defer log.Span.Finish() level.Debug(log).Log("from", from, "through", through, "matchers", len(allMatchers)) // Validate the query is within reasonable bounds. - metricName, matchers, shortcut, err := c.validateQuery(ctx, &from, &through, allMatchers) + metricName, matchers, shortcut, err := c.validateQuery(ctx, userID, &from, &through, allMatchers) if err != nil { return nil, err } else if shortcut { @@ -175,25 +174,20 @@ func (c *store) Get(ctx context.Context, from, through model.Time, allMatchers . } log.Span.SetTag("metric", metricName) - return c.getMetricNameChunks(ctx, from, through, matchers, metricName) + return c.getMetricNameChunks(ctx, userID, from, through, matchers, metricName) } -func (c *store) GetChunkRefs(ctx context.Context, from, through model.Time, allMatchers ...*labels.Matcher) ([][]Chunk, []*Fetcher, error) { +func (c *store) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, allMatchers ...*labels.Matcher) ([][]Chunk, []*Fetcher, error) { return nil, nil, errors.New("not implemented") } // LabelValuesForMetricName retrieves all label values for a single label name and metric name. -func (c *store) LabelValuesForMetricName(ctx context.Context, from, through model.Time, metricName, labelName string) ([]string, error) { +func (c *store) LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName, labelName string) ([]string, error) { log, ctx := spanlogger.New(ctx, "ChunkStore.LabelValues") defer log.Span.Finish() level.Debug(log).Log("from", from, "through", through, "metricName", metricName, "labelName", labelName) - userID, err := user.ExtractOrgID(ctx) - if err != nil { - return nil, err - } - - shortcut, err := c.validateQueryTimeRange(ctx, &from, &through) + shortcut, err := c.validateQueryTimeRange(ctx, userID, &from, &through) if err != nil { return nil, err } else if shortcut { @@ -224,19 +218,19 @@ func (c *store) LabelValuesForMetricName(ctx context.Context, from, through mode } // LabelNamesForMetricName retrieves all label names for a metric name. -func (c *store) LabelNamesForMetricName(ctx context.Context, from, through model.Time, metricName string) ([]string, error) { +func (c *store) LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string) ([]string, error) { log, ctx := spanlogger.New(ctx, "ChunkStore.LabelNamesForMetricName") defer log.Span.Finish() level.Debug(log).Log("from", from, "through", through, "metricName", metricName) - shortcut, err := c.validateQueryTimeRange(ctx, &from, &through) + shortcut, err := c.validateQueryTimeRange(ctx, userID, &from, &through) if err != nil { return nil, err } else if shortcut { return nil, nil } - chunks, err := c.lookupChunksByMetricName(ctx, from, through, nil, metricName) + chunks, err := c.lookupChunksByMetricName(ctx, userID, from, through, nil, metricName) if err != nil { return nil, err } @@ -256,7 +250,7 @@ func (c *store) LabelNamesForMetricName(ctx context.Context, from, through model return labelNamesFromChunks(allChunks), nil } -func (c *store) validateQueryTimeRange(ctx context.Context, from *model.Time, through *model.Time) (bool, error) { +func (c *store) validateQueryTimeRange(ctx context.Context, userID string, from *model.Time, through *model.Time) (bool, error) { log, ctx := spanlogger.New(ctx, "store.validateQueryTimeRange") defer log.Span.Finish() @@ -264,11 +258,6 @@ func (c *store) validateQueryTimeRange(ctx context.Context, from *model.Time, th return false, httpgrpc.Errorf(http.StatusBadRequest, "invalid query, through < from (%s < %s)", through, from) } - userID, err := user.ExtractOrgID(ctx) - if err != nil { - return false, err - } - maxQueryLength := c.limits.MaxQueryLength(userID) if maxQueryLength > 0 && (*through).Sub(*from) > maxQueryLength { return false, httpgrpc.Errorf(http.StatusBadRequest, validation.ErrQueryTooLong, (*through).Sub(*from), maxQueryLength) @@ -303,11 +292,11 @@ func (c *store) validateQueryTimeRange(ctx context.Context, from *model.Time, th return false, nil } -func (c *store) validateQuery(ctx context.Context, from *model.Time, through *model.Time, matchers []*labels.Matcher) (string, []*labels.Matcher, bool, error) { +func (c *store) validateQuery(ctx context.Context, userID string, from *model.Time, through *model.Time, matchers []*labels.Matcher) (string, []*labels.Matcher, bool, error) { log, ctx := spanlogger.New(ctx, "store.validateQuery") defer log.Span.Finish() - shortcut, err := c.validateQueryTimeRange(ctx, from, through) + shortcut, err := c.validateQueryTimeRange(ctx, userID, from, through) if err != nil { return "", nil, false, err } @@ -324,18 +313,13 @@ func (c *store) validateQuery(ctx context.Context, from *model.Time, through *mo return metricNameMatcher.Value, matchers, false, nil } -func (c *store) getMetricNameChunks(ctx context.Context, from, through model.Time, allMatchers []*labels.Matcher, metricName string) ([]Chunk, error) { +func (c *store) getMetricNameChunks(ctx context.Context, userID string, from, through model.Time, allMatchers []*labels.Matcher, metricName string) ([]Chunk, error) { log, ctx := spanlogger.New(ctx, "ChunkStore.getMetricNameChunks") defer log.Finish() level.Debug(log).Log("from", from, "through", through, "metricName", metricName, "matchers", len(allMatchers)) - userID, err := user.ExtractOrgID(ctx) - if err != nil { - return nil, err - } - filters, matchers := util.SplitFiltersAndMatchers(allMatchers) - chunks, err := c.lookupChunksByMetricName(ctx, from, through, matchers, metricName) + chunks, err := c.lookupChunksByMetricName(ctx, userID, from, through, matchers, metricName) if err != nil { return nil, err } @@ -364,15 +348,10 @@ func (c *store) getMetricNameChunks(ctx context.Context, from, through model.Tim return filteredChunks, nil } -func (c *store) lookupChunksByMetricName(ctx context.Context, from, through model.Time, matchers []*labels.Matcher, metricName string) ([]Chunk, error) { +func (c *store) lookupChunksByMetricName(ctx context.Context, userID string, from, through model.Time, matchers []*labels.Matcher, metricName string) ([]Chunk, error) { log, ctx := spanlogger.New(ctx, "ChunkStore.lookupChunksByMetricName") defer log.Finish() - userID, err := user.ExtractOrgID(ctx) - if err != nil { - return nil, err - } - // Just get chunks for metric if there are no matchers if len(matchers) == 0 { queries, err := c.schema.GetReadQueriesForMetric(from, through, userID, metricName) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go index 3d0f9b568a14a..f22aad0705d54 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go @@ -19,12 +19,12 @@ type StoreLimits interface { type Store interface { Put(ctx context.Context, chunks []Chunk) error PutOne(ctx context.Context, from, through model.Time, chunk Chunk) error - Get(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]Chunk, error) + Get(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]Chunk, error) // GetChunkRefs returns the un-loaded chunks and the fetchers to be used to load them. You can load each slice of chunks ([]Chunk), // using the corresponding Fetcher (fetchers[i].FetchChunks(ctx, chunks[i], ...) - GetChunkRefs(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([][]Chunk, []*Fetcher, error) - LabelValuesForMetricName(ctx context.Context, from, through model.Time, metricName string, labelName string) ([]string, error) - LabelNamesForMetricName(ctx context.Context, from, through model.Time, metricName string) ([]string, error) + GetChunkRefs(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([][]Chunk, []*Fetcher, error) + LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string, labelName string) ([]string, error) + LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string) ([]string, error) Stop() } @@ -85,10 +85,10 @@ func (c compositeStore) PutOne(ctx context.Context, from, through model.Time, ch }) } -func (c compositeStore) Get(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]Chunk, error) { +func (c compositeStore) Get(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([]Chunk, error) { var results []Chunk err := c.forStores(from, through, func(from, through model.Time, store Store) error { - chunks, err := store.Get(ctx, from, through, matchers...) + chunks, err := store.Get(ctx, userID, from, through, matchers...) if err != nil { return err } @@ -99,10 +99,10 @@ func (c compositeStore) Get(ctx context.Context, from, through model.Time, match } // LabelValuesForMetricName retrieves all label values for a single label name and metric name. -func (c compositeStore) LabelValuesForMetricName(ctx context.Context, from, through model.Time, metricName string, labelName string) ([]string, error) { +func (c compositeStore) LabelValuesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string, labelName string) ([]string, error) { var result []string err := c.forStores(from, through, func(from, through model.Time, store Store) error { - labelValues, err := store.LabelValuesForMetricName(ctx, from, through, metricName, labelName) + labelValues, err := store.LabelValuesForMetricName(ctx, userID, from, through, metricName, labelName) if err != nil { return err } @@ -113,10 +113,10 @@ func (c compositeStore) LabelValuesForMetricName(ctx context.Context, from, thro } // LabelNamesForMetricName retrieves all label names for a metric name. -func (c compositeStore) LabelNamesForMetricName(ctx context.Context, from, through model.Time, metricName string) ([]string, error) { +func (c compositeStore) LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string) ([]string, error) { var result []string err := c.forStores(from, through, func(from, through model.Time, store Store) error { - labelNames, err := store.LabelNamesForMetricName(ctx, from, through, metricName) + labelNames, err := store.LabelNamesForMetricName(ctx, userID, from, through, metricName) if err != nil { return err } @@ -126,11 +126,11 @@ func (c compositeStore) LabelNamesForMetricName(ctx context.Context, from, throu return result, err } -func (c compositeStore) GetChunkRefs(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([][]Chunk, []*Fetcher, error) { +func (c compositeStore) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([][]Chunk, []*Fetcher, error) { chunkIDs := [][]Chunk{} fetchers := []*Fetcher{} err := c.forStores(from, through, func(from, through model.Time, store Store) error { - ids, fetcher, err := store.GetChunkRefs(ctx, from, through, matchers...) + ids, fetcher, err := store.GetChunkRefs(ctx, userID, from, through, matchers...) if err != nil { return err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/bigchunk.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/bigchunk.go index b028d6e8ab031..66dff5b1cdee8 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/bigchunk.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/bigchunk.go @@ -7,7 +7,7 @@ import ( "io" "github.com/prometheus/common/model" - "github.com/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/chunkenc" ) const samplesPerChunk = 120 @@ -15,9 +15,8 @@ const samplesPerChunk = 120 var errOutOfBounds = errors.New("out of bounds") type smallChunk struct { - *chunkenc.XORChunk + chunkenc.XORChunk start int64 - end int64 } // bigchunk is a set of prometheus/tsdb chunks. It grows over time and has no @@ -45,7 +44,6 @@ func (b *bigchunk) Add(sample model.SamplePair) ([]Chunk, error) { b.appender.Append(int64(sample.Timestamp), float64(sample.Value)) b.remainingSamples-- - b.chunks[len(b.chunks)-1].end = int64(sample.Timestamp) return []Chunk{b}, nil } @@ -63,22 +61,25 @@ func (b *bigchunk) addNextChunk(start model.Time) error { if err != nil { return err } - b.chunks[l-1].XORChunk = compacted.(*chunkenc.XORChunk) + b.chunks[l-1].XORChunk = *compacted.(*chunkenc.XORChunk) } } - chunk := chunkenc.NewXORChunk() - appender, err := chunk.Appender() - if err != nil { - return err + // Explicitly reallocate slice to avoid up to 2x overhead if we let append() do it + if len(b.chunks)+1 > cap(b.chunks) { + newChunks := make([]smallChunk, len(b.chunks), len(b.chunks)+1) + copy(newChunks, b.chunks) + b.chunks = newChunks } - b.chunks = append(b.chunks, smallChunk{ - XORChunk: chunk, + XORChunk: *chunkenc.NewXORChunk(), start: int64(start), - end: int64(start), }) + appender, err := b.chunks[len(b.chunks)-1].Appender() + if err != nil { + return err + } b.appender = appender b.remainingSamples = samplesPerChunk return nil @@ -114,6 +115,7 @@ func (b *bigchunk) UnmarshalFromBuf(buf []byte) error { } b.chunks = make([]smallChunk, 0, numChunks+1) // allow one extra space in case we want to add new data + var reuseIter chunkenc.Iterator for i := uint16(0); i < numChunks; i++ { chunkLen, err := r.ReadUint16() if err != nil { @@ -130,15 +132,15 @@ func (b *bigchunk) UnmarshalFromBuf(buf []byte) error { return err } - start, end, err := firstAndLastTimes(chunk) + var start int64 + start, reuseIter, err = firstTime(chunk, reuseIter) if err != nil { return err } b.chunks = append(b.chunks, smallChunk{ - XORChunk: chunk.(*chunkenc.XORChunk), + XORChunk: *chunk.(*chunkenc.XORChunk), start: int64(start), - end: int64(end), }) } return nil @@ -169,10 +171,20 @@ func (b *bigchunk) Size() int { return sum } -func (b *bigchunk) NewIterator() Iterator { +func (b *bigchunk) NewIterator(reuseIter Iterator) Iterator { + if bci, ok := reuseIter.(*bigchunkIterator); ok { + bci.bigchunk = b + bci.i = 0 + if len(b.chunks) > 0 { + bci.curr = b.chunks[0].Iterator(bci.curr) + } else { + bci.curr = chunkenc.NewNopIterator() + } + return bci + } var it chunkenc.Iterator if len(b.chunks) > 0 { - it = b.chunks[0].Iterator() + it = b.chunks[0].Iterator(it) } else { it = chunkenc.NewNopIterator() } @@ -185,8 +197,8 @@ func (b *bigchunk) NewIterator() Iterator { func (b *bigchunk) Slice(start, end model.Time) Chunk { i, j := 0, len(b.chunks) for k := 0; k < len(b.chunks); k++ { - if b.chunks[k].end < int64(start) { - i = k + 1 + if b.chunks[k].start <= int64(start) { + i = k } if b.chunks[k].start > int64(end) { j = k @@ -246,20 +258,17 @@ func (it *bigchunkIterator) FindAtOrAfter(target model.Time) bool { // If the seek is outside the current chunk, use the index to find the right // chunk. - if int64(target) < it.chunks[it.i].start || int64(target) > it.chunks[it.i].end { + if int64(target) < it.chunks[it.i].start || + (it.i+1 < len(it.chunks) && int64(target) >= it.chunks[it.i+1].start) { it.curr = nil - for it.i = 0; it.i < len(it.chunks) && int64(target) > it.chunks[it.i].end; it.i++ { + for it.i = 0; it.i+1 < len(it.chunks) && int64(target) >= it.chunks[it.i+1].start; it.i++ { } } - if it.i >= len(it.chunks) { - return false - } - if it.curr == nil { - it.curr = it.chunks[it.i].Iterator() + it.curr = it.chunks[it.i].Iterator(it.curr) } else if t, _ := it.curr.At(); int64(target) <= t { - it.curr = it.chunks[it.i].Iterator() + it.curr = it.chunks[it.i].Iterator(it.curr) } for it.curr.Next() { @@ -268,6 +277,14 @@ func (it *bigchunkIterator) FindAtOrAfter(target model.Time) bool { return true } } + // Timestamp is after the end of that chunk - if there is another chunk + // then the position we need is at the beginning of it. + if it.i+1 < len(it.chunks) { + it.i++ + it.curr = it.chunks[it.i].Iterator(it.curr) + it.curr.Next() + return true + } return false } @@ -281,7 +298,7 @@ func (it *bigchunkIterator) Scan() bool { for it.i < len(it.chunks)-1 { it.i++ - it.curr = it.chunks[it.i].Iterator() + it.curr = it.chunks[it.i].Iterator(it.curr) if it.curr.Next() { return true } @@ -321,20 +338,11 @@ func (it *bigchunkIterator) Err() error { return nil } -func firstAndLastTimes(c chunkenc.Chunk) (int64, int64, error) { - var ( - first int64 - last int64 - firstSet bool - iter = c.Iterator() - ) - for iter.Next() { - t, _ := iter.At() - if !firstSet { - first = t - firstSet = true - } - last = t +func firstTime(c chunkenc.Chunk, iter chunkenc.Iterator) (int64, chunkenc.Iterator, error) { + var first int64 + iter = c.Iterator(iter) + if iter.Next() { + first, _ = iter.At() } - return first, last, iter.Err() + return first, iter, iter.Err() } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/chunk.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/chunk.go index 3a0116a0b2a98..f36e4d3597f20 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/chunk.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/chunk.go @@ -44,7 +44,10 @@ type Chunk interface { // or a newly allocated version. In any case, take the returned chunk as // the relevant one and discard the original chunk. Add(sample model.SamplePair) ([]Chunk, error) - NewIterator() Iterator + // NewIterator returns an iterator for the chunks. + // The iterator passed as argument is for re-use. Depending on implementation, + // the iterator can be re-used or a new iterator can be allocated. + NewIterator(Iterator) Iterator Marshal(io.Writer) error UnmarshalFromBuf([]byte) error Encoding() Encoding @@ -141,7 +144,7 @@ func transcodeAndAdd(dst Chunk, src Chunk, s model.SamplePair) ([]Chunk, error) err error ) - it := src.NewIterator() + it := src.NewIterator(nil) for it.Scan() { if NewChunks, err = head.Add(it.Value()); err != nil { return nil, err diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/delta.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/delta.go index 76ad6914c650a..120f734c363e3 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/delta.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/delta.go @@ -185,7 +185,7 @@ func (c *deltaEncodedChunk) Slice(_, _ model.Time) Chunk { } // NewIterator implements chunk. -func (c *deltaEncodedChunk) NewIterator() Iterator { +func (c *deltaEncodedChunk) NewIterator(_ Iterator) Iterator { return newIndexAccessingChunkIterator(c.Len(), &deltaEncodedIndexAccessor{ c: *c, baseT: c.baseTime(), diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/doubledelta.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/doubledelta.go index ac5470abca35c..b9ca6f0402b8c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/doubledelta.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/doubledelta.go @@ -192,8 +192,8 @@ func (c doubleDeltaEncodedChunk) FirstTime() model.Time { return c.baseTime() } -// NewIterator( implements chunk. -func (c *doubleDeltaEncodedChunk) NewIterator() Iterator { +// NewIterator implements chunk. +func (c *doubleDeltaEncodedChunk) NewIterator(_ Iterator) Iterator { return newIndexAccessingChunkIterator(c.Len(), &doubleDeltaEncodedIndexAccessor{ c: *c, baseT: c.baseTime(), diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/factory.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/factory.go index 3d4b6f12b58ac..83ab9f2602d57 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/factory.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/factory.go @@ -28,6 +28,9 @@ func (Config) RegisterFlags(f *flag.FlagSet) { // String implements flag.Value. func (e Encoding) String() string { + if known, found := encodings[e]; found { + return known.Name + } return fmt.Sprintf("%d", e) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/varbit.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/varbit.go index 2005053aa78a4..c2663fe9e2473 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/varbit.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/encoding/varbit.go @@ -276,7 +276,7 @@ func (c *varbitChunk) Add(s model.SamplePair) ([]Chunk, error) { } // NewIterator implements chunk. -func (c varbitChunk) NewIterator() Iterator { +func (c varbitChunk) NewIterator(_ Iterator) Iterator { return newVarbitChunkIterator(c) } @@ -329,7 +329,7 @@ func (c varbitChunk) marshalLen() int { // Len implements chunk. Runs in O(n). func (c varbitChunk) Len() int { - it := c.NewIterator() + it := c.NewIterator(nil) i := 0 for ; it.Scan(); i++ { } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/inmemory_storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/inmemory_storage_client.go index cd29bf4e34582..56251a19f328f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/inmemory_storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/inmemory_storage_client.go @@ -266,9 +266,7 @@ func (m *MockStorage) query(ctx context.Context, query IndexQuery, callback func } result := mockReadBatch{} - for _, item := range items { - result.items = append(result.items, item) - } + result.items = append(result.items, items...) callback(&result) return nil diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_caching.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_caching.go index c20e74968efb6..a29b00c73d53c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_caching.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_caching.go @@ -14,80 +14,61 @@ type schemaCaching struct { } func (s *schemaCaching) GetReadQueriesForMetric(from, through model.Time, userID string, metricName string) ([]IndexQuery, error) { - cFrom, cThrough, from, through := splitTimesByCacheability(from, through, model.TimeFromUnix(mtime.Now().Add(-s.cacheOlderThan).Unix())) - - cacheableQueries, err := s.Schema.GetReadQueriesForMetric(cFrom, cThrough, userID, metricName) - if err != nil { - return nil, err - } - - activeQueries, err := s.Schema.GetReadQueriesForMetric(from, through, userID, metricName) - if err != nil { - return nil, err - } - - return mergeCacheableAndActiveQueries(cacheableQueries, activeQueries), nil + return s.splitTimesByCacheability(from, through, func(from, through model.Time) ([]IndexQuery, error) { + return s.Schema.GetReadQueriesForMetric(from, through, userID, metricName) + }) } func (s *schemaCaching) GetReadQueriesForMetricLabel(from, through model.Time, userID string, metricName string, labelName string) ([]IndexQuery, error) { - cFrom, cThrough, from, through := splitTimesByCacheability(from, through, model.TimeFromUnix(mtime.Now().Add(-s.cacheOlderThan).Unix())) - - cacheableQueries, err := s.Schema.GetReadQueriesForMetricLabel(cFrom, cThrough, userID, metricName, labelName) - if err != nil { - return nil, err - } - - activeQueries, err := s.Schema.GetReadQueriesForMetricLabel(from, through, userID, metricName, labelName) - if err != nil { - return nil, err - } - - return mergeCacheableAndActiveQueries(cacheableQueries, activeQueries), nil + return s.splitTimesByCacheability(from, through, func(from, through model.Time) ([]IndexQuery, error) { + return s.Schema.GetReadQueriesForMetricLabel(from, through, userID, metricName, labelName) + }) } func (s *schemaCaching) GetReadQueriesForMetricLabelValue(from, through model.Time, userID string, metricName string, labelName string, labelValue string) ([]IndexQuery, error) { - cFrom, cThrough, from, through := splitTimesByCacheability(from, through, model.TimeFromUnix(mtime.Now().Add(-s.cacheOlderThan).Unix())) - - cacheableQueries, err := s.Schema.GetReadQueriesForMetricLabelValue(cFrom, cThrough, userID, metricName, labelName, labelValue) - if err != nil { - return nil, err - } - - activeQueries, err := s.Schema.GetReadQueriesForMetricLabelValue(from, through, userID, metricName, labelName, labelValue) - if err != nil { - return nil, err - } - - return mergeCacheableAndActiveQueries(cacheableQueries, activeQueries), nil + return s.splitTimesByCacheability(from, through, func(from, through model.Time) ([]IndexQuery, error) { + return s.Schema.GetReadQueriesForMetricLabelValue(from, through, userID, metricName, labelName, labelValue) + }) } // If the query resulted in series IDs, use this method to find chunks. func (s *schemaCaching) GetChunksForSeries(from, through model.Time, userID string, seriesID []byte) ([]IndexQuery, error) { - cFrom, cThrough, from, through := splitTimesByCacheability(from, through, model.TimeFromUnix(mtime.Now().Add(-s.cacheOlderThan).Unix())) - - cacheableQueries, err := s.Schema.GetChunksForSeries(cFrom, cThrough, userID, seriesID) - if err != nil { - return nil, err - } - - activeQueries, err := s.Schema.GetChunksForSeries(from, through, userID, seriesID) - if err != nil { - return nil, err - } - - return mergeCacheableAndActiveQueries(cacheableQueries, activeQueries), nil + return s.splitTimesByCacheability(from, through, func(from, through model.Time) ([]IndexQuery, error) { + return s.Schema.GetChunksForSeries(from, through, userID, seriesID) + }) } -func splitTimesByCacheability(from, through model.Time, cacheBefore model.Time) (model.Time, model.Time, model.Time, model.Time) { +func (s *schemaCaching) splitTimesByCacheability(from, through model.Time, f func(from, through model.Time) ([]IndexQuery, error)) ([]IndexQuery, error) { + var ( + cacheableQueries []IndexQuery + activeQueries []IndexQuery + err error + cacheBefore = model.TimeFromUnix(mtime.Now().Add(-s.cacheOlderThan).Unix()) + ) + if from.After(cacheBefore) { - return 0, 0, from, through - } + activeQueries, err = f(from, through) + if err != nil { + return nil, err + } + } else if through.Before(cacheBefore) { + cacheableQueries, err = f(from, through) + if err != nil { + return nil, err + } + } else { + cacheableQueries, err = f(from, cacheBefore) + if err != nil { + return nil, err + } - if through.Before(cacheBefore) { - return from, through, 0, 0 + activeQueries, err = f(cacheBefore, through) + if err != nil { + return nil, err + } } - return from, cacheBefore, cacheBefore, through + return mergeCacheableAndActiveQueries(cacheableQueries, activeQueries), nil } func mergeCacheableAndActiveQueries(cacheableQueries []IndexQuery, activeQueries []IndexQuery) []IndexQuery { diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go index 1a7f10bd38468..061c4ba39f6c0 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go @@ -259,11 +259,6 @@ func (cfg *PeriodConfig) hourlyBuckets(from, through model.Time, userID string) result = []Bucket{} ) - // If through ends on the hour, don't include the upcoming hour - if through.Unix()%secondsInHour == 0 { - throughHour-- - } - for i := fromHour; i <= throughHour; i++ { relativeFrom := util.Max64(0, int64(from)-(i*millisecondsInHour)) relativeThrough := util.Min64(millisecondsInHour, int64(through)-(i*millisecondsInHour)) @@ -284,11 +279,6 @@ func (cfg *PeriodConfig) dailyBuckets(from, through model.Time, userID string) [ result = []Bucket{} ) - // If through ends on 00:00 of the day, don't include the upcoming day - if through.Unix()%secondsInDay == 0 { - throughDay-- - } - for i := fromDay; i <= throughDay; i++ { // The idea here is that the hash key contains the bucket start time (rounded to // the nearest day). The range key can contain the offset from that, to the diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go index a811300afa02e..1bfda046f162d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go @@ -11,7 +11,6 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" "github.com/weaveworks/common/httpgrpc" - "github.com/weaveworks/common/user" "github.com/cortexproject/cortex/pkg/chunk/cache" "github.com/cortexproject/cortex/pkg/util" @@ -98,17 +97,12 @@ func newSeriesStore(cfg StoreConfig, schema Schema, index IndexClient, chunks Ob } // Get implements Store -func (c *seriesStore) Get(ctx context.Context, from, through model.Time, allMatchers ...*labels.Matcher) ([]Chunk, error) { +func (c *seriesStore) Get(ctx context.Context, userID string, from, through model.Time, allMatchers ...*labels.Matcher) ([]Chunk, error) { log, ctx := spanlogger.New(ctx, "SeriesStore.Get") defer log.Span.Finish() level.Debug(log).Log("from", from, "through", through, "matchers", len(allMatchers)) - userID, err := user.ExtractOrgID(ctx) - if err != nil { - return nil, err - } - - chks, fetchers, err := c.GetChunkRefs(ctx, from, through, allMatchers...) + chks, fetchers, err := c.GetChunkRefs(ctx, userID, from, through, allMatchers...) if err != nil { return nil, err } @@ -141,17 +135,12 @@ func (c *seriesStore) Get(ctx context.Context, from, through model.Time, allMatc return filteredChunks, nil } -func (c *seriesStore) GetChunkRefs(ctx context.Context, from, through model.Time, allMatchers ...*labels.Matcher) ([][]Chunk, []*Fetcher, error) { +func (c *seriesStore) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, allMatchers ...*labels.Matcher) ([][]Chunk, []*Fetcher, error) { log, ctx := spanlogger.New(ctx, "SeriesStore.GetChunkRefs") defer log.Span.Finish() - userID, err := user.ExtractOrgID(ctx) - if err != nil { - return nil, nil, err - } - // Validate the query is within reasonable bounds. - metricName, matchers, shortcut, err := c.validateQuery(ctx, &from, &through, allMatchers) + metricName, matchers, shortcut, err := c.validateQuery(ctx, userID, &from, &through, allMatchers) if err != nil { return nil, nil, err } else if shortcut { @@ -191,16 +180,11 @@ func (c *seriesStore) GetChunkRefs(ctx context.Context, from, through model.Time } // LabelNamesForMetricName retrieves all label names for a metric name. -func (c *seriesStore) LabelNamesForMetricName(ctx context.Context, from, through model.Time, metricName string) ([]string, error) { +func (c *seriesStore) LabelNamesForMetricName(ctx context.Context, userID string, from, through model.Time, metricName string) ([]string, error) { log, ctx := spanlogger.New(ctx, "SeriesStore.LabelNamesForMetricName") defer log.Span.Finish() - userID, err := user.ExtractOrgID(ctx) - if err != nil { - return nil, err - } - - shortcut, err := c.validateQueryTimeRange(ctx, &from, &through) + shortcut, err := c.validateQueryTimeRange(ctx, userID, &from, &through) if err != nil { return nil, err } else if shortcut { diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go index 563a32c93a644..f1863653b792a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go @@ -233,7 +233,6 @@ func (s *cachingIndexClient) cacheStore(ctx context.Context, keys []string, batc } s.cache.Store(ctx, hashed, bufs) - return } func (s *cachingIndexClient) cacheFetch(ctx context.Context, keys []string) (batches []ReadBatch, missed []string) { diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go index 27865b7c57912..ccdb146fad14e 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go @@ -98,7 +98,7 @@ func (cfg *TableManagerConfig) RegisterFlags(f *flag.FlagSet) { // RegisterFlags adds the flags required to config this to the given FlagSet. func (cfg *ProvisionConfig) RegisterFlags(argPrefix string, f *flag.FlagSet) { - f.Int64Var(&cfg.ProvisionedWriteThroughput, argPrefix+".write-throughput", 3000, "DynamoDB table default write throughput.") + f.Int64Var(&cfg.ProvisionedWriteThroughput, argPrefix+".write-throughput", 1000, "DynamoDB table default write throughput.") f.Int64Var(&cfg.ProvisionedReadThroughput, argPrefix+".read-throughput", 300, "DynamoDB table default read throughput.") f.BoolVar(&cfg.ProvisionedThroughputOnDemandMode, argPrefix+".enable-ondemand-throughput-mode", false, "Enables on demand throughput provisioning for the storage provider (if supported). Applies only to tables which are not autoscaled") f.Int64Var(&cfg.InactiveWriteThroughput, argPrefix+".inactive-write-throughput", 1, "DynamoDB table write throughput for inactive tables.") @@ -152,7 +152,7 @@ func (m *TableManager) Start() { m.wait.Add(1) go m.loop() - if m.bucketClient != nil && m.cfg.RetentionPeriod != 0 && m.cfg.RetentionDeletesEnabled == true { + if m.bucketClient != nil && m.cfg.RetentionPeriod != 0 && m.cfg.RetentionDeletesEnabled { m.wait.Add(1) go m.bucketRetentionLoop() } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go index dfd2531717c16..4174896e421b0 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go @@ -18,26 +18,19 @@ import ( var json = jsoniter.ConfigCompatibleWithStandardLibrary -// ToWriteRequest converts an array of samples into a WriteRequest proto. -func ToWriteRequest(samples []model.Sample, source WriteRequest_SourceEnum) *WriteRequest { +// ToWriteRequest converts matched slices of Labels and Samples into a WriteRequest proto. +// It gets timeseries from the pool, so ReuseSlice() should be called when done. +func ToWriteRequest(lbls []labels.Labels, samples []Sample, source WriteRequest_SourceEnum) *WriteRequest { req := &WriteRequest{ - Timeseries: make([]PreallocTimeseries, 0, len(samples)), + Timeseries: slicePool.Get().([]PreallocTimeseries), Source: source, } - for _, s := range samples { - ts := PreallocTimeseries{ - TimeSeries: TimeSeries{ - Labels: FromMetricsToLabelAdapters(s.Metric), - Samples: []Sample{ - { - Value: float64(s.Value), - TimestampMs: int64(s.Timestamp), - }, - }, - }, - } - req.Timeseries = append(req.Timeseries, ts) + for i, s := range samples { + ts := timeSeriesPool.Get().(*TimeSeries) + ts.Labels = append(ts.Labels, FromLabelsToLabelAdapters(lbls[i])...) + ts.Samples = append(ts.Samples, s) + req.Timeseries = append(req.Timeseries, PreallocTimeseries{TimeSeries: ts}) } return req @@ -201,10 +194,10 @@ func FromLabelAdaptersToLabels(ls []LabelAdapter) labels.Labels { return *(*labels.Labels)(unsafe.Pointer(&ls)) } -// FromLabelsToLabelAdapaters casts labels.Labels to []LabelAdapter. +// FromLabelsToLabelAdapters casts labels.Labels to []LabelAdapter. // It uses unsafe, but as LabelAdapter == labels.Label this should be safe. // This allows us to use labels.Labels directly in protos. -func FromLabelsToLabelAdapaters(ls labels.Labels) []LabelAdapter { +func FromLabelsToLabelAdapters(ls labels.Labels) []LabelAdapter { return *(*[]LabelAdapter)(unsafe.Pointer(&ls)) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/timeseries.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/timeseries.go index 8919ba99f3ab8..204382952b0ba 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/timeseries.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/timeseries.go @@ -24,7 +24,7 @@ var ( timeSeriesPool = sync.Pool{ New: func() interface{} { - return TimeSeries{ + return &TimeSeries{ Labels: make([]LabelAdapter, 0, expectedLabels), Samples: make([]Sample, 0, expectedSamplesPerSeries), } @@ -56,12 +56,12 @@ func (p *PreallocWriteRequest) Unmarshal(dAtA []byte) error { // PreallocTimeseries is a TimeSeries which preallocs slices on Unmarshal. type PreallocTimeseries struct { - TimeSeries + *TimeSeries } // Unmarshal implements proto.Message. func (p *PreallocTimeseries) Unmarshal(dAtA []byte) error { - p.TimeSeries = timeSeriesPool.Get().(TimeSeries) + p.TimeSeries = timeSeriesPool.Get().(*TimeSeries) return p.TimeSeries.Unmarshal(dAtA) } @@ -249,7 +249,7 @@ func ReuseSlice(slice []PreallocTimeseries) { } // ReuseTimeseries puts the timeseries back into a sync.Pool for reuse. -func ReuseTimeseries(ts TimeSeries) { +func ReuseTimeseries(ts *TimeSeries) { ts.Labels = ts.Labels[:0] ts.Samples = ts.Samples[:0] timeSeriesPool.Put(ts) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go b/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go index 2decdea48ceda..376199083c079 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go @@ -37,19 +37,26 @@ type itemTracker struct { // // Not implemented as a method on Ring so we can test separately. func DoBatch(ctx context.Context, r ReadRing, keys []uint32, callback func(IngesterDesc, []int) error, cleanup func()) error { - replicationSets, err := r.BatchGet(keys, Write) - if err != nil { - return err - } - + expectedTrackers := len(keys) * (r.ReplicationFactor() + 1) / r.IngesterCount() itemTrackers := make([]itemTracker, len(keys)) - ingesters := map[string]ingester{} - for i, replicationSet := range replicationSets { + ingesters := make(map[string]ingester, r.IngesterCount()) + + const maxExpectedReplicationSet = 5 // Typical replication factor 3, plus one for inactive plus one for luck. + var descs [maxExpectedReplicationSet]IngesterDesc + for i, key := range keys { + replicationSet, err := r.Get(key, Write, descs[:0]) + if err != nil { + return err + } itemTrackers[i].minSuccess = len(replicationSet.Ingesters) - replicationSet.MaxErrors itemTrackers[i].maxFailures = replicationSet.MaxErrors for _, desc := range replicationSet.Ingesters { - curr := ingesters[desc.Addr] + curr, found := ingesters[desc.Addr] + if !found { + curr.itemTrackers = make([]*itemTracker, 0, expectedTrackers) + curr.indexes = make([]int, 0, expectedTrackers) + } ingesters[desc.Addr] = ingester{ desc: desc, itemTrackers: append(curr.itemTrackers, &itemTrackers[i]), @@ -60,8 +67,8 @@ func DoBatch(ctx context.Context, r ReadRing, keys []uint32, callback func(Inges tracker := batchTracker{ rpcsPending: int32(len(itemTrackers)), - done: make(chan struct{}), - err: make(chan error), + done: make(chan struct{}, 1), + err: make(chan error, 1), } var wg sync.WaitGroup diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go index 43f49b4b09ad1..669ab94989b01 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go @@ -191,7 +191,7 @@ func (i *Lifecycler) CheckReady(ctx context.Context) error { return fmt.Errorf("waiting for %v after startup", i.cfg.MinReadyDuration) } - ringDesc, err := i.KVStore.Get(ctx, ConsulKey) + desc, err := i.KVStore.Get(ctx, ConsulKey) if err != nil { level.Error(util.Logger).Log("msg", "error talking to consul", "err", err) return fmt.Errorf("error talking to consul: %s", err) @@ -200,7 +200,13 @@ func (i *Lifecycler) CheckReady(ctx context.Context) error { if len(i.getTokens()) == 0 { return fmt.Errorf("this ingester owns no tokens") } - if err := ringDesc.(*Desc).Ready(i.cfg.RingConfig.HeartbeatTimeout); err != nil { + + ringDesc, ok := desc.(*Desc) + if !ok || ringDesc == nil { + return fmt.Errorf("no ring returned from consul") + } + + if err := ringDesc.Ready(i.cfg.RingConfig.HeartbeatTimeout); err != nil { return err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_strategy.go b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_strategy.go index a62b1683545a9..5d0728659fec1 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_strategy.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_strategy.go @@ -10,9 +10,8 @@ import ( // tolerate. // - Filters out dead ingesters so the one doesn't even try to write to them. // - Checks there is enough ingesters for an operation to succeed. -func (r *Ring) replicationStrategy(ingesters []IngesterDesc, op Operation) ( - liveIngesters []IngesterDesc, maxFailure int, err error, -) { +// The ingesters argument may be overwritten. +func (r *Ring) replicationStrategy(ingesters []IngesterDesc, op Operation) ([]IngesterDesc, int, error) { // We need a response from a quorum of ingesters, which is n/2 + 1. In the // case of a node joining/leaving, the actual replica set might be bigger // than the replication factor, so use the bigger or the two. @@ -21,29 +20,29 @@ func (r *Ring) replicationStrategy(ingesters []IngesterDesc, op Operation) ( replicationFactor = len(ingesters) } minSuccess := (replicationFactor / 2) + 1 - maxFailure = replicationFactor - minSuccess + maxFailure := replicationFactor - minSuccess // Skip those that have not heartbeated in a while. NB these are still // included in the calculation of minSuccess, so if too many failed ingesters // will cause the whole write to fail. - liveIngesters = make([]IngesterDesc, 0, len(ingesters)) - for _, ingester := range ingesters { - if r.IsHealthy(&ingester, op) { - liveIngesters = append(liveIngesters, ingester) + for i := 0; i < len(ingesters); { + if r.IsHealthy(&ingesters[i], op) { + i++ } else { + ingesters = append(ingesters[:i], ingesters[i+1:]...) maxFailure-- } } // This is just a shortcut - if there are not minSuccess available ingesters, // after filtering out dead ones, don't even bother trying. - if maxFailure < 0 || len(liveIngesters) < minSuccess { - err = fmt.Errorf("at least %d live ingesters required, could only find %d", - minSuccess, len(liveIngesters)) - return + if maxFailure < 0 || len(ingesters) < minSuccess { + err := fmt.Errorf("at least %d live ingesters required, could only find %d", + minSuccess, len(ingesters)) + return nil, 0, err } - return + return ingesters, maxFailure, nil } // IsHealthy checks whether an ingester appears to be alive and heartbeating @@ -60,3 +59,11 @@ func (r *Ring) IsHealthy(ingester *IngesterDesc, op Operation) bool { func (r *Ring) ReplicationFactor() int { return r.cfg.ReplicationFactor } + +// IngesterCount is number of ingesters in the ring +func (r *Ring) IngesterCount() int { + r.mtx.Lock() + c := len(r.ringDesc.Ingesters) + r.mtx.Unlock() + return c +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go index a74b22ecfdca5..681f66203fda1 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go @@ -31,10 +31,13 @@ const ( type ReadRing interface { prometheus.Collector - Get(key uint32, op Operation) (ReplicationSet, error) - BatchGet(keys []uint32, op Operation) ([]ReplicationSet, error) + // Get returns n (or more) ingesters which form the replicas for the given key. + // buf is a slice to be overwritten for the return value + // to avoid memory allocation; can be nil. + Get(key uint32, op Operation, buf []IngesterDesc) (ReplicationSet, error) GetAll() (ReplicationSet, error) ReplicationFactor() int + IngesterCount() int } // Operation can be Read or Write @@ -181,37 +184,16 @@ func migrateRing(desc *Desc) []TokenDesc { } // Get returns n (or more) ingesters which form the replicas for the given key. -func (r *Ring) Get(key uint32, op Operation) (ReplicationSet, error) { +func (r *Ring) Get(key uint32, op Operation, buf []IngesterDesc) (ReplicationSet, error) { r.mtx.RLock() defer r.mtx.RUnlock() - return r.getInternal(key, op) -} - -// BatchGet returns ReplicationFactor (or more) ingesters which form the replicas -// for the given keys. The order of the result matches the order of the input. -func (r *Ring) BatchGet(keys []uint32, op Operation) ([]ReplicationSet, error) { - r.mtx.RLock() - defer r.mtx.RUnlock() - - result := make([]ReplicationSet, len(keys), len(keys)) - for i, key := range keys { - rs, err := r.getInternal(key, op) - if err != nil { - return nil, err - } - result[i] = rs - } - return result, nil -} - -func (r *Ring) getInternal(key uint32, op Operation) (ReplicationSet, error) { if r.ringDesc == nil || len(r.ringDesc.Tokens) == 0 { return ReplicationSet{}, ErrEmptyRing } var ( n = r.cfg.ReplicationFactor - ingesters = make([]IngesterDesc, 0, n) + ingesters = buf[:0] distinctHosts = map[string]struct{}{} start = r.search(key) iterations = 0 diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/util.go b/vendor/github.com/cortexproject/cortex/pkg/ring/util.go index 1ecfd0793e904..d072e549322e0 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/util.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/util.go @@ -2,23 +2,26 @@ package ring import ( "math/rand" - "sort" "time" ) -// GenerateTokens make numTokens random tokens, none of which clash -// with takenTokens. Assumes takenTokens is sorted. +// GenerateTokens make numTokens unique random tokens, none of which clash +// with takenTokens. func GenerateTokens(numTokens int, takenTokens []uint32) []uint32 { r := rand.New(rand.NewSource(time.Now().UnixNano())) + + used := make(map[uint32]bool) + for _, v := range takenTokens { + used[v] = true + } + tokens := []uint32{} for i := 0; i < numTokens; { candidate := r.Uint32() - j := sort.Search(len(takenTokens), func(i int) bool { - return takenTokens[i] >= candidate - }) - if j < len(takenTokens) && takenTokens[j] == candidate { + if used[candidate] { continue } + used[candidate] = true tokens = append(tokens, candidate) i++ } diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go index 9e7bcfd6549b7..e486d405cfdc2 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go @@ -226,7 +226,7 @@ func (o *Overrides) EnforceMetricName(userID string) bool { return o.overridesManager.GetLimits(userID).(*Limits).EnforceMetricName } -// CardinalityLimit whether to enforce the presence of a metric name. +// CardinalityLimit returns the maximum number of timeseries allowed in a query. func (o *Overrides) CardinalityLimit(userID string) int { return o.overridesManager.GetLimits(userID).(*Limits).CardinalityLimit } diff --git a/vendor/github.com/prometheus/tsdb/LICENSE b/vendor/github.com/gomodule/redigo/LICENSE similarity index 89% rename from vendor/github.com/prometheus/tsdb/LICENSE rename to vendor/github.com/gomodule/redigo/LICENSE index 261eeb9e9f8b2..67db8588217f2 100644 --- a/vendor/github.com/prometheus/tsdb/LICENSE +++ b/vendor/github.com/gomodule/redigo/LICENSE @@ -1,3 +1,4 @@ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -172,30 +173,3 @@ defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/gomodule/redigo/internal/commandinfo.go b/vendor/github.com/gomodule/redigo/internal/commandinfo.go new file mode 100644 index 0000000000000..b763efbdd8dcc --- /dev/null +++ b/vendor/github.com/gomodule/redigo/internal/commandinfo.go @@ -0,0 +1,54 @@ +// Copyright 2014 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package internal // import "github.com/gomodule/redigo/internal" + +import ( + "strings" +) + +const ( + WatchState = 1 << iota + MultiState + SubscribeState + MonitorState +) + +type CommandInfo struct { + Set, Clear int +} + +var commandInfos = map[string]CommandInfo{ + "WATCH": {Set: WatchState}, + "UNWATCH": {Clear: WatchState}, + "MULTI": {Set: MultiState}, + "EXEC": {Clear: WatchState | MultiState}, + "DISCARD": {Clear: WatchState | MultiState}, + "PSUBSCRIBE": {Set: SubscribeState}, + "SUBSCRIBE": {Set: SubscribeState}, + "MONITOR": {Set: MonitorState}, +} + +func init() { + for n, ci := range commandInfos { + commandInfos[strings.ToLower(n)] = ci + } +} + +func LookupCommandInfo(commandName string) CommandInfo { + if ci, ok := commandInfos[commandName]; ok { + return ci + } + return commandInfos[strings.ToUpper(commandName)] +} diff --git a/vendor/github.com/gomodule/redigo/redis/conn.go b/vendor/github.com/gomodule/redigo/redis/conn.go new file mode 100644 index 0000000000000..5aa0f32f2e277 --- /dev/null +++ b/vendor/github.com/gomodule/redigo/redis/conn.go @@ -0,0 +1,673 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "fmt" + "io" + "net" + "net/url" + "regexp" + "strconv" + "sync" + "time" +) + +var ( + _ ConnWithTimeout = (*conn)(nil) +) + +// conn is the low-level implementation of Conn +type conn struct { + // Shared + mu sync.Mutex + pending int + err error + conn net.Conn + + // Read + readTimeout time.Duration + br *bufio.Reader + + // Write + writeTimeout time.Duration + bw *bufio.Writer + + // Scratch space for formatting argument length. + // '*' or '$', length, "\r\n" + lenScratch [32]byte + + // Scratch space for formatting integers and floats. + numScratch [40]byte +} + +// DialTimeout acts like Dial but takes timeouts for establishing the +// connection to the server, writing a command and reading a reply. +// +// Deprecated: Use Dial with options instead. +func DialTimeout(network, address string, connectTimeout, readTimeout, writeTimeout time.Duration) (Conn, error) { + return Dial(network, address, + DialConnectTimeout(connectTimeout), + DialReadTimeout(readTimeout), + DialWriteTimeout(writeTimeout)) +} + +// DialOption specifies an option for dialing a Redis server. +type DialOption struct { + f func(*dialOptions) +} + +type dialOptions struct { + readTimeout time.Duration + writeTimeout time.Duration + dialer *net.Dialer + dial func(network, addr string) (net.Conn, error) + db int + password string + useTLS bool + skipVerify bool + tlsConfig *tls.Config +} + +// DialReadTimeout specifies the timeout for reading a single command reply. +func DialReadTimeout(d time.Duration) DialOption { + return DialOption{func(do *dialOptions) { + do.readTimeout = d + }} +} + +// DialWriteTimeout specifies the timeout for writing a single command. +func DialWriteTimeout(d time.Duration) DialOption { + return DialOption{func(do *dialOptions) { + do.writeTimeout = d + }} +} + +// DialConnectTimeout specifies the timeout for connecting to the Redis server when +// no DialNetDial option is specified. +func DialConnectTimeout(d time.Duration) DialOption { + return DialOption{func(do *dialOptions) { + do.dialer.Timeout = d + }} +} + +// DialKeepAlive specifies the keep-alive period for TCP connections to the Redis server +// when no DialNetDial option is specified. +// If zero, keep-alives are not enabled. If no DialKeepAlive option is specified then +// the default of 5 minutes is used to ensure that half-closed TCP sessions are detected. +func DialKeepAlive(d time.Duration) DialOption { + return DialOption{func(do *dialOptions) { + do.dialer.KeepAlive = d + }} +} + +// DialNetDial specifies a custom dial function for creating TCP +// connections, otherwise a net.Dialer customized via the other options is used. +// DialNetDial overrides DialConnectTimeout and DialKeepAlive. +func DialNetDial(dial func(network, addr string) (net.Conn, error)) DialOption { + return DialOption{func(do *dialOptions) { + do.dial = dial + }} +} + +// DialDatabase specifies the database to select when dialing a connection. +func DialDatabase(db int) DialOption { + return DialOption{func(do *dialOptions) { + do.db = db + }} +} + +// DialPassword specifies the password to use when connecting to +// the Redis server. +func DialPassword(password string) DialOption { + return DialOption{func(do *dialOptions) { + do.password = password + }} +} + +// DialTLSConfig specifies the config to use when a TLS connection is dialed. +// Has no effect when not dialing a TLS connection. +func DialTLSConfig(c *tls.Config) DialOption { + return DialOption{func(do *dialOptions) { + do.tlsConfig = c + }} +} + +// DialTLSSkipVerify disables server name verification when connecting over +// TLS. Has no effect when not dialing a TLS connection. +func DialTLSSkipVerify(skip bool) DialOption { + return DialOption{func(do *dialOptions) { + do.skipVerify = skip + }} +} + +// DialUseTLS specifies whether TLS should be used when connecting to the +// server. This option is ignore by DialURL. +func DialUseTLS(useTLS bool) DialOption { + return DialOption{func(do *dialOptions) { + do.useTLS = useTLS + }} +} + +// Dial connects to the Redis server at the given network and +// address using the specified options. +func Dial(network, address string, options ...DialOption) (Conn, error) { + do := dialOptions{ + dialer: &net.Dialer{ + KeepAlive: time.Minute * 5, + }, + } + for _, option := range options { + option.f(&do) + } + if do.dial == nil { + do.dial = do.dialer.Dial + } + + netConn, err := do.dial(network, address) + if err != nil { + return nil, err + } + + if do.useTLS { + var tlsConfig *tls.Config + if do.tlsConfig == nil { + tlsConfig = &tls.Config{InsecureSkipVerify: do.skipVerify} + } else { + tlsConfig = cloneTLSConfig(do.tlsConfig) + } + if tlsConfig.ServerName == "" { + host, _, err := net.SplitHostPort(address) + if err != nil { + netConn.Close() + return nil, err + } + tlsConfig.ServerName = host + } + + tlsConn := tls.Client(netConn, tlsConfig) + if err := tlsConn.Handshake(); err != nil { + netConn.Close() + return nil, err + } + netConn = tlsConn + } + + c := &conn{ + conn: netConn, + bw: bufio.NewWriter(netConn), + br: bufio.NewReader(netConn), + readTimeout: do.readTimeout, + writeTimeout: do.writeTimeout, + } + + if do.password != "" { + if _, err := c.Do("AUTH", do.password); err != nil { + netConn.Close() + return nil, err + } + } + + if do.db != 0 { + if _, err := c.Do("SELECT", do.db); err != nil { + netConn.Close() + return nil, err + } + } + + return c, nil +} + +var pathDBRegexp = regexp.MustCompile(`/(\d*)\z`) + +// DialURL connects to a Redis server at the given URL using the Redis +// URI scheme. URLs should follow the draft IANA specification for the +// scheme (https://www.iana.org/assignments/uri-schemes/prov/redis). +func DialURL(rawurl string, options ...DialOption) (Conn, error) { + u, err := url.Parse(rawurl) + if err != nil { + return nil, err + } + + if u.Scheme != "redis" && u.Scheme != "rediss" { + return nil, fmt.Errorf("invalid redis URL scheme: %s", u.Scheme) + } + + // As per the IANA draft spec, the host defaults to localhost and + // the port defaults to 6379. + host, port, err := net.SplitHostPort(u.Host) + if err != nil { + // assume port is missing + host = u.Host + port = "6379" + } + if host == "" { + host = "localhost" + } + address := net.JoinHostPort(host, port) + + if u.User != nil { + password, isSet := u.User.Password() + if isSet { + options = append(options, DialPassword(password)) + } + } + + match := pathDBRegexp.FindStringSubmatch(u.Path) + if len(match) == 2 { + db := 0 + if len(match[1]) > 0 { + db, err = strconv.Atoi(match[1]) + if err != nil { + return nil, fmt.Errorf("invalid database: %s", u.Path[1:]) + } + } + if db != 0 { + options = append(options, DialDatabase(db)) + } + } else if u.Path != "" { + return nil, fmt.Errorf("invalid database: %s", u.Path[1:]) + } + + options = append(options, DialUseTLS(u.Scheme == "rediss")) + + return Dial("tcp", address, options...) +} + +// NewConn returns a new Redigo connection for the given net connection. +func NewConn(netConn net.Conn, readTimeout, writeTimeout time.Duration) Conn { + return &conn{ + conn: netConn, + bw: bufio.NewWriter(netConn), + br: bufio.NewReader(netConn), + readTimeout: readTimeout, + writeTimeout: writeTimeout, + } +} + +func (c *conn) Close() error { + c.mu.Lock() + err := c.err + if c.err == nil { + c.err = errors.New("redigo: closed") + err = c.conn.Close() + } + c.mu.Unlock() + return err +} + +func (c *conn) fatal(err error) error { + c.mu.Lock() + if c.err == nil { + c.err = err + // Close connection to force errors on subsequent calls and to unblock + // other reader or writer. + c.conn.Close() + } + c.mu.Unlock() + return err +} + +func (c *conn) Err() error { + c.mu.Lock() + err := c.err + c.mu.Unlock() + return err +} + +func (c *conn) writeLen(prefix byte, n int) error { + c.lenScratch[len(c.lenScratch)-1] = '\n' + c.lenScratch[len(c.lenScratch)-2] = '\r' + i := len(c.lenScratch) - 3 + for { + c.lenScratch[i] = byte('0' + n%10) + i -= 1 + n = n / 10 + if n == 0 { + break + } + } + c.lenScratch[i] = prefix + _, err := c.bw.Write(c.lenScratch[i:]) + return err +} + +func (c *conn) writeString(s string) error { + c.writeLen('$', len(s)) + c.bw.WriteString(s) + _, err := c.bw.WriteString("\r\n") + return err +} + +func (c *conn) writeBytes(p []byte) error { + c.writeLen('$', len(p)) + c.bw.Write(p) + _, err := c.bw.WriteString("\r\n") + return err +} + +func (c *conn) writeInt64(n int64) error { + return c.writeBytes(strconv.AppendInt(c.numScratch[:0], n, 10)) +} + +func (c *conn) writeFloat64(n float64) error { + return c.writeBytes(strconv.AppendFloat(c.numScratch[:0], n, 'g', -1, 64)) +} + +func (c *conn) writeCommand(cmd string, args []interface{}) error { + c.writeLen('*', 1+len(args)) + if err := c.writeString(cmd); err != nil { + return err + } + for _, arg := range args { + if err := c.writeArg(arg, true); err != nil { + return err + } + } + return nil +} + +func (c *conn) writeArg(arg interface{}, argumentTypeOK bool) (err error) { + switch arg := arg.(type) { + case string: + return c.writeString(arg) + case []byte: + return c.writeBytes(arg) + case int: + return c.writeInt64(int64(arg)) + case int64: + return c.writeInt64(arg) + case float64: + return c.writeFloat64(arg) + case bool: + if arg { + return c.writeString("1") + } else { + return c.writeString("0") + } + case nil: + return c.writeString("") + case Argument: + if argumentTypeOK { + return c.writeArg(arg.RedisArg(), false) + } + // See comment in default clause below. + var buf bytes.Buffer + fmt.Fprint(&buf, arg) + return c.writeBytes(buf.Bytes()) + default: + // This default clause is intended to handle builtin numeric types. + // The function should return an error for other types, but this is not + // done for compatibility with previous versions of the package. + var buf bytes.Buffer + fmt.Fprint(&buf, arg) + return c.writeBytes(buf.Bytes()) + } +} + +type protocolError string + +func (pe protocolError) Error() string { + return fmt.Sprintf("redigo: %s (possible server error or unsupported concurrent read by application)", string(pe)) +} + +func (c *conn) readLine() ([]byte, error) { + p, err := c.br.ReadSlice('\n') + if err == bufio.ErrBufferFull { + return nil, protocolError("long response line") + } + if err != nil { + return nil, err + } + i := len(p) - 2 + if i < 0 || p[i] != '\r' { + return nil, protocolError("bad response line terminator") + } + return p[:i], nil +} + +// parseLen parses bulk string and array lengths. +func parseLen(p []byte) (int, error) { + if len(p) == 0 { + return -1, protocolError("malformed length") + } + + if p[0] == '-' && len(p) == 2 && p[1] == '1' { + // handle $-1 and $-1 null replies. + return -1, nil + } + + var n int + for _, b := range p { + n *= 10 + if b < '0' || b > '9' { + return -1, protocolError("illegal bytes in length") + } + n += int(b - '0') + } + + return n, nil +} + +// parseInt parses an integer reply. +func parseInt(p []byte) (interface{}, error) { + if len(p) == 0 { + return 0, protocolError("malformed integer") + } + + var negate bool + if p[0] == '-' { + negate = true + p = p[1:] + if len(p) == 0 { + return 0, protocolError("malformed integer") + } + } + + var n int64 + for _, b := range p { + n *= 10 + if b < '0' || b > '9' { + return 0, protocolError("illegal bytes in length") + } + n += int64(b - '0') + } + + if negate { + n = -n + } + return n, nil +} + +var ( + okReply interface{} = "OK" + pongReply interface{} = "PONG" +) + +func (c *conn) readReply() (interface{}, error) { + line, err := c.readLine() + if err != nil { + return nil, err + } + if len(line) == 0 { + return nil, protocolError("short response line") + } + switch line[0] { + case '+': + switch { + case len(line) == 3 && line[1] == 'O' && line[2] == 'K': + // Avoid allocation for frequent "+OK" response. + return okReply, nil + case len(line) == 5 && line[1] == 'P' && line[2] == 'O' && line[3] == 'N' && line[4] == 'G': + // Avoid allocation in PING command benchmarks :) + return pongReply, nil + default: + return string(line[1:]), nil + } + case '-': + return Error(string(line[1:])), nil + case ':': + return parseInt(line[1:]) + case '$': + n, err := parseLen(line[1:]) + if n < 0 || err != nil { + return nil, err + } + p := make([]byte, n) + _, err = io.ReadFull(c.br, p) + if err != nil { + return nil, err + } + if line, err := c.readLine(); err != nil { + return nil, err + } else if len(line) != 0 { + return nil, protocolError("bad bulk string format") + } + return p, nil + case '*': + n, err := parseLen(line[1:]) + if n < 0 || err != nil { + return nil, err + } + r := make([]interface{}, n) + for i := range r { + r[i], err = c.readReply() + if err != nil { + return nil, err + } + } + return r, nil + } + return nil, protocolError("unexpected response line") +} + +func (c *conn) Send(cmd string, args ...interface{}) error { + c.mu.Lock() + c.pending += 1 + c.mu.Unlock() + if c.writeTimeout != 0 { + c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) + } + if err := c.writeCommand(cmd, args); err != nil { + return c.fatal(err) + } + return nil +} + +func (c *conn) Flush() error { + if c.writeTimeout != 0 { + c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) + } + if err := c.bw.Flush(); err != nil { + return c.fatal(err) + } + return nil +} + +func (c *conn) Receive() (interface{}, error) { + return c.ReceiveWithTimeout(c.readTimeout) +} + +func (c *conn) ReceiveWithTimeout(timeout time.Duration) (reply interface{}, err error) { + var deadline time.Time + if timeout != 0 { + deadline = time.Now().Add(timeout) + } + c.conn.SetReadDeadline(deadline) + + if reply, err = c.readReply(); err != nil { + return nil, c.fatal(err) + } + // When using pub/sub, the number of receives can be greater than the + // number of sends. To enable normal use of the connection after + // unsubscribing from all channels, we do not decrement pending to a + // negative value. + // + // The pending field is decremented after the reply is read to handle the + // case where Receive is called before Send. + c.mu.Lock() + if c.pending > 0 { + c.pending -= 1 + } + c.mu.Unlock() + if err, ok := reply.(Error); ok { + return nil, err + } + return +} + +func (c *conn) Do(cmd string, args ...interface{}) (interface{}, error) { + return c.DoWithTimeout(c.readTimeout, cmd, args...) +} + +func (c *conn) DoWithTimeout(readTimeout time.Duration, cmd string, args ...interface{}) (interface{}, error) { + c.mu.Lock() + pending := c.pending + c.pending = 0 + c.mu.Unlock() + + if cmd == "" && pending == 0 { + return nil, nil + } + + if c.writeTimeout != 0 { + c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) + } + + if cmd != "" { + if err := c.writeCommand(cmd, args); err != nil { + return nil, c.fatal(err) + } + } + + if err := c.bw.Flush(); err != nil { + return nil, c.fatal(err) + } + + var deadline time.Time + if readTimeout != 0 { + deadline = time.Now().Add(readTimeout) + } + c.conn.SetReadDeadline(deadline) + + if cmd == "" { + reply := make([]interface{}, pending) + for i := range reply { + r, e := c.readReply() + if e != nil { + return nil, c.fatal(e) + } + reply[i] = r + } + return reply, nil + } + + var err error + var reply interface{} + for i := 0; i <= pending; i++ { + var e error + if reply, e = c.readReply(); e != nil { + return nil, c.fatal(e) + } + if e, ok := reply.(Error); ok && err == nil { + err = e + } + } + return reply, err +} diff --git a/vendor/github.com/gomodule/redigo/redis/doc.go b/vendor/github.com/gomodule/redigo/redis/doc.go new file mode 100644 index 0000000000000..70ec1ea69197a --- /dev/null +++ b/vendor/github.com/gomodule/redigo/redis/doc.go @@ -0,0 +1,177 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package redis is a client for the Redis database. +// +// The Redigo FAQ (https://github.com/gomodule/redigo/wiki/FAQ) contains more +// documentation about this package. +// +// Connections +// +// The Conn interface is the primary interface for working with Redis. +// Applications create connections by calling the Dial, DialWithTimeout or +// NewConn functions. In the future, functions will be added for creating +// sharded and other types of connections. +// +// The application must call the connection Close method when the application +// is done with the connection. +// +// Executing Commands +// +// The Conn interface has a generic method for executing Redis commands: +// +// Do(commandName string, args ...interface{}) (reply interface{}, err error) +// +// The Redis command reference (http://redis.io/commands) lists the available +// commands. An example of using the Redis APPEND command is: +// +// n, err := conn.Do("APPEND", "key", "value") +// +// The Do method converts command arguments to bulk strings for transmission +// to the server as follows: +// +// Go Type Conversion +// []byte Sent as is +// string Sent as is +// int, int64 strconv.FormatInt(v) +// float64 strconv.FormatFloat(v, 'g', -1, 64) +// bool true -> "1", false -> "0" +// nil "" +// all other types fmt.Fprint(w, v) +// +// Redis command reply types are represented using the following Go types: +// +// Redis type Go type +// error redis.Error +// integer int64 +// simple string string +// bulk string []byte or nil if value not present. +// array []interface{} or nil if value not present. +// +// Use type assertions or the reply helper functions to convert from +// interface{} to the specific Go type for the command result. +// +// Pipelining +// +// Connections support pipelining using the Send, Flush and Receive methods. +// +// Send(commandName string, args ...interface{}) error +// Flush() error +// Receive() (reply interface{}, err error) +// +// Send writes the command to the connection's output buffer. Flush flushes the +// connection's output buffer to the server. Receive reads a single reply from +// the server. The following example shows a simple pipeline. +// +// c.Send("SET", "foo", "bar") +// c.Send("GET", "foo") +// c.Flush() +// c.Receive() // reply from SET +// v, err = c.Receive() // reply from GET +// +// The Do method combines the functionality of the Send, Flush and Receive +// methods. The Do method starts by writing the command and flushing the output +// buffer. Next, the Do method receives all pending replies including the reply +// for the command just sent by Do. If any of the received replies is an error, +// then Do returns the error. If there are no errors, then Do returns the last +// reply. If the command argument to the Do method is "", then the Do method +// will flush the output buffer and receive pending replies without sending a +// command. +// +// Use the Send and Do methods to implement pipelined transactions. +// +// c.Send("MULTI") +// c.Send("INCR", "foo") +// c.Send("INCR", "bar") +// r, err := c.Do("EXEC") +// fmt.Println(r) // prints [1, 1] +// +// Concurrency +// +// Connections support one concurrent caller to the Receive method and one +// concurrent caller to the Send and Flush methods. No other concurrency is +// supported including concurrent calls to the Do method. +// +// For full concurrent access to Redis, use the thread-safe Pool to get, use +// and release a connection from within a goroutine. Connections returned from +// a Pool have the concurrency restrictions described in the previous +// paragraph. +// +// Publish and Subscribe +// +// Use the Send, Flush and Receive methods to implement Pub/Sub subscribers. +// +// c.Send("SUBSCRIBE", "example") +// c.Flush() +// for { +// reply, err := c.Receive() +// if err != nil { +// return err +// } +// // process pushed message +// } +// +// The PubSubConn type wraps a Conn with convenience methods for implementing +// subscribers. The Subscribe, PSubscribe, Unsubscribe and PUnsubscribe methods +// send and flush a subscription management command. The receive method +// converts a pushed message to convenient types for use in a type switch. +// +// psc := redis.PubSubConn{Conn: c} +// psc.Subscribe("example") +// for { +// switch v := psc.Receive().(type) { +// case redis.Message: +// fmt.Printf("%s: message: %s\n", v.Channel, v.Data) +// case redis.Subscription: +// fmt.Printf("%s: %s %d\n", v.Channel, v.Kind, v.Count) +// case error: +// return v +// } +// } +// +// Reply Helpers +// +// The Bool, Int, Bytes, String, Strings and Values functions convert a reply +// to a value of a specific type. To allow convenient wrapping of calls to the +// connection Do and Receive methods, the functions take a second argument of +// type error. If the error is non-nil, then the helper function returns the +// error. If the error is nil, the function converts the reply to the specified +// type: +// +// exists, err := redis.Bool(c.Do("EXISTS", "foo")) +// if err != nil { +// // handle error return from c.Do or type conversion error. +// } +// +// The Scan function converts elements of a array reply to Go types: +// +// var value1 int +// var value2 string +// reply, err := redis.Values(c.Do("MGET", "key1", "key2")) +// if err != nil { +// // handle error +// } +// if _, err := redis.Scan(reply, &value1, &value2); err != nil { +// // handle error +// } +// +// Errors +// +// Connection methods return error replies from the server as type redis.Error. +// +// Call the connection Err() method to determine if the connection encountered +// non-recoverable error such as a network error or protocol parsing error. If +// Err() returns a non-nil value, then the connection is not usable and should +// be closed. +package redis // import "github.com/gomodule/redigo/redis" diff --git a/vendor/github.com/gomodule/redigo/redis/go16.go b/vendor/github.com/gomodule/redigo/redis/go16.go new file mode 100644 index 0000000000000..f6b1a7ccdceef --- /dev/null +++ b/vendor/github.com/gomodule/redigo/redis/go16.go @@ -0,0 +1,27 @@ +// +build !go1.7 + +package redis + +import "crypto/tls" + +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + } +} diff --git a/vendor/github.com/gomodule/redigo/redis/go17.go b/vendor/github.com/gomodule/redigo/redis/go17.go new file mode 100644 index 0000000000000..5f36379113c77 --- /dev/null +++ b/vendor/github.com/gomodule/redigo/redis/go17.go @@ -0,0 +1,29 @@ +// +build go1.7,!go1.8 + +package redis + +import "crypto/tls" + +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + DynamicRecordSizingDisabled: cfg.DynamicRecordSizingDisabled, + Renegotiation: cfg.Renegotiation, + } +} diff --git a/vendor/github.com/gomodule/redigo/redis/go18.go b/vendor/github.com/gomodule/redigo/redis/go18.go new file mode 100644 index 0000000000000..558363be39aed --- /dev/null +++ b/vendor/github.com/gomodule/redigo/redis/go18.go @@ -0,0 +1,9 @@ +// +build go1.8 + +package redis + +import "crypto/tls" + +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + return cfg.Clone() +} diff --git a/vendor/github.com/gomodule/redigo/redis/log.go b/vendor/github.com/gomodule/redigo/redis/log.go new file mode 100644 index 0000000000000..b2996611c6d02 --- /dev/null +++ b/vendor/github.com/gomodule/redigo/redis/log.go @@ -0,0 +1,134 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "bytes" + "fmt" + "log" + "time" +) + +var ( + _ ConnWithTimeout = (*loggingConn)(nil) +) + +// NewLoggingConn returns a logging wrapper around a connection. +func NewLoggingConn(conn Conn, logger *log.Logger, prefix string) Conn { + if prefix != "" { + prefix = prefix + "." + } + return &loggingConn{conn, logger, prefix} +} + +type loggingConn struct { + Conn + logger *log.Logger + prefix string +} + +func (c *loggingConn) Close() error { + err := c.Conn.Close() + var buf bytes.Buffer + fmt.Fprintf(&buf, "%sClose() -> (%v)", c.prefix, err) + c.logger.Output(2, buf.String()) + return err +} + +func (c *loggingConn) printValue(buf *bytes.Buffer, v interface{}) { + const chop = 32 + switch v := v.(type) { + case []byte: + if len(v) > chop { + fmt.Fprintf(buf, "%q...", v[:chop]) + } else { + fmt.Fprintf(buf, "%q", v) + } + case string: + if len(v) > chop { + fmt.Fprintf(buf, "%q...", v[:chop]) + } else { + fmt.Fprintf(buf, "%q", v) + } + case []interface{}: + if len(v) == 0 { + buf.WriteString("[]") + } else { + sep := "[" + fin := "]" + if len(v) > chop { + v = v[:chop] + fin = "...]" + } + for _, vv := range v { + buf.WriteString(sep) + c.printValue(buf, vv) + sep = ", " + } + buf.WriteString(fin) + } + default: + fmt.Fprint(buf, v) + } +} + +func (c *loggingConn) print(method, commandName string, args []interface{}, reply interface{}, err error) { + var buf bytes.Buffer + fmt.Fprintf(&buf, "%s%s(", c.prefix, method) + if method != "Receive" { + buf.WriteString(commandName) + for _, arg := range args { + buf.WriteString(", ") + c.printValue(&buf, arg) + } + } + buf.WriteString(") -> (") + if method != "Send" { + c.printValue(&buf, reply) + buf.WriteString(", ") + } + fmt.Fprintf(&buf, "%v)", err) + c.logger.Output(3, buf.String()) +} + +func (c *loggingConn) Do(commandName string, args ...interface{}) (interface{}, error) { + reply, err := c.Conn.Do(commandName, args...) + c.print("Do", commandName, args, reply, err) + return reply, err +} + +func (c *loggingConn) DoWithTimeout(timeout time.Duration, commandName string, args ...interface{}) (interface{}, error) { + reply, err := DoWithTimeout(c.Conn, timeout, commandName, args...) + c.print("DoWithTimeout", commandName, args, reply, err) + return reply, err +} + +func (c *loggingConn) Send(commandName string, args ...interface{}) error { + err := c.Conn.Send(commandName, args...) + c.print("Send", commandName, args, nil, err) + return err +} + +func (c *loggingConn) Receive() (interface{}, error) { + reply, err := c.Conn.Receive() + c.print("Receive", "", nil, reply, err) + return reply, err +} + +func (c *loggingConn) ReceiveWithTimeout(timeout time.Duration) (interface{}, error) { + reply, err := ReceiveWithTimeout(c.Conn, timeout) + c.print("ReceiveWithTimeout", "", nil, reply, err) + return reply, err +} diff --git a/vendor/github.com/gomodule/redigo/redis/pool.go b/vendor/github.com/gomodule/redigo/redis/pool.go new file mode 100644 index 0000000000000..d77da3254a3cb --- /dev/null +++ b/vendor/github.com/gomodule/redigo/redis/pool.go @@ -0,0 +1,562 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "bytes" + "crypto/rand" + "crypto/sha1" + "errors" + "io" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/gomodule/redigo/internal" +) + +var ( + _ ConnWithTimeout = (*activeConn)(nil) + _ ConnWithTimeout = (*errorConn)(nil) +) + +var nowFunc = time.Now // for testing + +// ErrPoolExhausted is returned from a pool connection method (Do, Send, +// Receive, Flush, Err) when the maximum number of database connections in the +// pool has been reached. +var ErrPoolExhausted = errors.New("redigo: connection pool exhausted") + +var ( + errPoolClosed = errors.New("redigo: connection pool closed") + errConnClosed = errors.New("redigo: connection closed") +) + +// Pool maintains a pool of connections. The application calls the Get method +// to get a connection from the pool and the connection's Close method to +// return the connection's resources to the pool. +// +// The following example shows how to use a pool in a web application. The +// application creates a pool at application startup and makes it available to +// request handlers using a package level variable. The pool configuration used +// here is an example, not a recommendation. +// +// func newPool(addr string) *redis.Pool { +// return &redis.Pool{ +// MaxIdle: 3, +// IdleTimeout: 240 * time.Second, +// Dial: func () (redis.Conn, error) { return redis.Dial("tcp", addr) }, +// } +// } +// +// var ( +// pool *redis.Pool +// redisServer = flag.String("redisServer", ":6379", "") +// ) +// +// func main() { +// flag.Parse() +// pool = newPool(*redisServer) +// ... +// } +// +// A request handler gets a connection from the pool and closes the connection +// when the handler is done: +// +// func serveHome(w http.ResponseWriter, r *http.Request) { +// conn := pool.Get() +// defer conn.Close() +// ... +// } +// +// Use the Dial function to authenticate connections with the AUTH command or +// select a database with the SELECT command: +// +// pool := &redis.Pool{ +// // Other pool configuration not shown in this example. +// Dial: func () (redis.Conn, error) { +// c, err := redis.Dial("tcp", server) +// if err != nil { +// return nil, err +// } +// if _, err := c.Do("AUTH", password); err != nil { +// c.Close() +// return nil, err +// } +// if _, err := c.Do("SELECT", db); err != nil { +// c.Close() +// return nil, err +// } +// return c, nil +// }, +// } +// +// Use the TestOnBorrow function to check the health of an idle connection +// before the connection is returned to the application. This example PINGs +// connections that have been idle more than a minute: +// +// pool := &redis.Pool{ +// // Other pool configuration not shown in this example. +// TestOnBorrow: func(c redis.Conn, t time.Time) error { +// if time.Since(t) < time.Minute { +// return nil +// } +// _, err := c.Do("PING") +// return err +// }, +// } +// +type Pool struct { + // Dial is an application supplied function for creating and configuring a + // connection. + // + // The connection returned from Dial must not be in a special state + // (subscribed to pubsub channel, transaction started, ...). + Dial func() (Conn, error) + + // TestOnBorrow is an optional application supplied function for checking + // the health of an idle connection before the connection is used again by + // the application. Argument t is the time that the connection was returned + // to the pool. If the function returns an error, then the connection is + // closed. + TestOnBorrow func(c Conn, t time.Time) error + + // Maximum number of idle connections in the pool. + MaxIdle int + + // Maximum number of connections allocated by the pool at a given time. + // When zero, there is no limit on the number of connections in the pool. + MaxActive int + + // Close connections after remaining idle for this duration. If the value + // is zero, then idle connections are not closed. Applications should set + // the timeout to a value less than the server's timeout. + IdleTimeout time.Duration + + // If Wait is true and the pool is at the MaxActive limit, then Get() waits + // for a connection to be returned to the pool before returning. + Wait bool + + // Close connections older than this duration. If the value is zero, then + // the pool does not close connections based on age. + MaxConnLifetime time.Duration + + chInitialized uint32 // set to 1 when field ch is initialized + + mu sync.Mutex // mu protects the following fields + closed bool // set to true when the pool is closed. + active int // the number of open connections in the pool + ch chan struct{} // limits open connections when p.Wait is true + idle idleList // idle connections +} + +// NewPool creates a new pool. +// +// Deprecated: Initialize the Pool directory as shown in the example. +func NewPool(newFn func() (Conn, error), maxIdle int) *Pool { + return &Pool{Dial: newFn, MaxIdle: maxIdle} +} + +// Get gets a connection. The application must close the returned connection. +// This method always returns a valid connection so that applications can defer +// error handling to the first use of the connection. If there is an error +// getting an underlying connection, then the connection Err, Do, Send, Flush +// and Receive methods return that error. +func (p *Pool) Get() Conn { + pc, err := p.get(nil) + if err != nil { + return errorConn{err} + } + return &activeConn{p: p, pc: pc} +} + +// PoolStats contains pool statistics. +type PoolStats struct { + // ActiveCount is the number of connections in the pool. The count includes + // idle connections and connections in use. + ActiveCount int + // IdleCount is the number of idle connections in the pool. + IdleCount int +} + +// Stats returns pool's statistics. +func (p *Pool) Stats() PoolStats { + p.mu.Lock() + stats := PoolStats{ + ActiveCount: p.active, + IdleCount: p.idle.count, + } + p.mu.Unlock() + + return stats +} + +// ActiveCount returns the number of connections in the pool. The count +// includes idle connections and connections in use. +func (p *Pool) ActiveCount() int { + p.mu.Lock() + active := p.active + p.mu.Unlock() + return active +} + +// IdleCount returns the number of idle connections in the pool. +func (p *Pool) IdleCount() int { + p.mu.Lock() + idle := p.idle.count + p.mu.Unlock() + return idle +} + +// Close releases the resources used by the pool. +func (p *Pool) Close() error { + p.mu.Lock() + if p.closed { + p.mu.Unlock() + return nil + } + p.closed = true + p.active -= p.idle.count + pc := p.idle.front + p.idle.count = 0 + p.idle.front, p.idle.back = nil, nil + if p.ch != nil { + close(p.ch) + } + p.mu.Unlock() + for ; pc != nil; pc = pc.next { + pc.c.Close() + } + return nil +} + +func (p *Pool) lazyInit() { + // Fast path. + if atomic.LoadUint32(&p.chInitialized) == 1 { + return + } + // Slow path. + p.mu.Lock() + if p.chInitialized == 0 { + p.ch = make(chan struct{}, p.MaxActive) + if p.closed { + close(p.ch) + } else { + for i := 0; i < p.MaxActive; i++ { + p.ch <- struct{}{} + } + } + atomic.StoreUint32(&p.chInitialized, 1) + } + p.mu.Unlock() +} + +// get prunes stale connections and returns a connection from the idle list or +// creates a new connection. +func (p *Pool) get(ctx interface { + Done() <-chan struct{} + Err() error +}) (*poolConn, error) { + + // Handle limit for p.Wait == true. + if p.Wait && p.MaxActive > 0 { + p.lazyInit() + if ctx == nil { + <-p.ch + } else { + select { + case <-p.ch: + case <-ctx.Done(): + return nil, ctx.Err() + } + } + } + + p.mu.Lock() + + // Prune stale connections at the back of the idle list. + if p.IdleTimeout > 0 { + n := p.idle.count + for i := 0; i < n && p.idle.back != nil && p.idle.back.t.Add(p.IdleTimeout).Before(nowFunc()); i++ { + pc := p.idle.back + p.idle.popBack() + p.mu.Unlock() + pc.c.Close() + p.mu.Lock() + p.active-- + } + } + + // Get idle connection from the front of idle list. + for p.idle.front != nil { + pc := p.idle.front + p.idle.popFront() + p.mu.Unlock() + if (p.TestOnBorrow == nil || p.TestOnBorrow(pc.c, pc.t) == nil) && + (p.MaxConnLifetime == 0 || nowFunc().Sub(pc.created) < p.MaxConnLifetime) { + return pc, nil + } + pc.c.Close() + p.mu.Lock() + p.active-- + } + + // Check for pool closed before dialing a new connection. + if p.closed { + p.mu.Unlock() + return nil, errors.New("redigo: get on closed pool") + } + + // Handle limit for p.Wait == false. + if !p.Wait && p.MaxActive > 0 && p.active >= p.MaxActive { + p.mu.Unlock() + return nil, ErrPoolExhausted + } + + p.active++ + p.mu.Unlock() + c, err := p.Dial() + if err != nil { + c = nil + p.mu.Lock() + p.active-- + if p.ch != nil && !p.closed { + p.ch <- struct{}{} + } + p.mu.Unlock() + } + return &poolConn{c: c, created: nowFunc()}, err +} + +func (p *Pool) put(pc *poolConn, forceClose bool) error { + p.mu.Lock() + if !p.closed && !forceClose { + pc.t = nowFunc() + p.idle.pushFront(pc) + if p.idle.count > p.MaxIdle { + pc = p.idle.back + p.idle.popBack() + } else { + pc = nil + } + } + + if pc != nil { + p.mu.Unlock() + pc.c.Close() + p.mu.Lock() + p.active-- + } + + if p.ch != nil && !p.closed { + p.ch <- struct{}{} + } + p.mu.Unlock() + return nil +} + +type activeConn struct { + p *Pool + pc *poolConn + state int +} + +var ( + sentinel []byte + sentinelOnce sync.Once +) + +func initSentinel() { + p := make([]byte, 64) + if _, err := rand.Read(p); err == nil { + sentinel = p + } else { + h := sha1.New() + io.WriteString(h, "Oops, rand failed. Use time instead.") + io.WriteString(h, strconv.FormatInt(time.Now().UnixNano(), 10)) + sentinel = h.Sum(nil) + } +} + +func (ac *activeConn) Close() error { + pc := ac.pc + if pc == nil { + return nil + } + ac.pc = nil + + if ac.state&internal.MultiState != 0 { + pc.c.Send("DISCARD") + ac.state &^= (internal.MultiState | internal.WatchState) + } else if ac.state&internal.WatchState != 0 { + pc.c.Send("UNWATCH") + ac.state &^= internal.WatchState + } + if ac.state&internal.SubscribeState != 0 { + pc.c.Send("UNSUBSCRIBE") + pc.c.Send("PUNSUBSCRIBE") + // To detect the end of the message stream, ask the server to echo + // a sentinel value and read until we see that value. + sentinelOnce.Do(initSentinel) + pc.c.Send("ECHO", sentinel) + pc.c.Flush() + for { + p, err := pc.c.Receive() + if err != nil { + break + } + if p, ok := p.([]byte); ok && bytes.Equal(p, sentinel) { + ac.state &^= internal.SubscribeState + break + } + } + } + pc.c.Do("") + ac.p.put(pc, ac.state != 0 || pc.c.Err() != nil) + return nil +} + +func (ac *activeConn) Err() error { + pc := ac.pc + if pc == nil { + return errConnClosed + } + return pc.c.Err() +} + +func (ac *activeConn) Do(commandName string, args ...interface{}) (reply interface{}, err error) { + pc := ac.pc + if pc == nil { + return nil, errConnClosed + } + ci := internal.LookupCommandInfo(commandName) + ac.state = (ac.state | ci.Set) &^ ci.Clear + return pc.c.Do(commandName, args...) +} + +func (ac *activeConn) DoWithTimeout(timeout time.Duration, commandName string, args ...interface{}) (reply interface{}, err error) { + pc := ac.pc + if pc == nil { + return nil, errConnClosed + } + cwt, ok := pc.c.(ConnWithTimeout) + if !ok { + return nil, errTimeoutNotSupported + } + ci := internal.LookupCommandInfo(commandName) + ac.state = (ac.state | ci.Set) &^ ci.Clear + return cwt.DoWithTimeout(timeout, commandName, args...) +} + +func (ac *activeConn) Send(commandName string, args ...interface{}) error { + pc := ac.pc + if pc == nil { + return errConnClosed + } + ci := internal.LookupCommandInfo(commandName) + ac.state = (ac.state | ci.Set) &^ ci.Clear + return pc.c.Send(commandName, args...) +} + +func (ac *activeConn) Flush() error { + pc := ac.pc + if pc == nil { + return errConnClosed + } + return pc.c.Flush() +} + +func (ac *activeConn) Receive() (reply interface{}, err error) { + pc := ac.pc + if pc == nil { + return nil, errConnClosed + } + return pc.c.Receive() +} + +func (ac *activeConn) ReceiveWithTimeout(timeout time.Duration) (reply interface{}, err error) { + pc := ac.pc + if pc == nil { + return nil, errConnClosed + } + cwt, ok := pc.c.(ConnWithTimeout) + if !ok { + return nil, errTimeoutNotSupported + } + return cwt.ReceiveWithTimeout(timeout) +} + +type errorConn struct{ err error } + +func (ec errorConn) Do(string, ...interface{}) (interface{}, error) { return nil, ec.err } +func (ec errorConn) DoWithTimeout(time.Duration, string, ...interface{}) (interface{}, error) { + return nil, ec.err +} +func (ec errorConn) Send(string, ...interface{}) error { return ec.err } +func (ec errorConn) Err() error { return ec.err } +func (ec errorConn) Close() error { return nil } +func (ec errorConn) Flush() error { return ec.err } +func (ec errorConn) Receive() (interface{}, error) { return nil, ec.err } +func (ec errorConn) ReceiveWithTimeout(time.Duration) (interface{}, error) { return nil, ec.err } + +type idleList struct { + count int + front, back *poolConn +} + +type poolConn struct { + c Conn + t time.Time + created time.Time + next, prev *poolConn +} + +func (l *idleList) pushFront(pc *poolConn) { + pc.next = l.front + pc.prev = nil + if l.count == 0 { + l.back = pc + } else { + l.front.prev = pc + } + l.front = pc + l.count++ + return +} + +func (l *idleList) popFront() { + pc := l.front + l.count-- + if l.count == 0 { + l.front, l.back = nil, nil + } else { + pc.next.prev = nil + l.front = pc.next + } + pc.next, pc.prev = nil, nil +} + +func (l *idleList) popBack() { + pc := l.back + l.count-- + if l.count == 0 { + l.front, l.back = nil, nil + } else { + pc.prev.next = nil + l.back = pc.prev + } + pc.next, pc.prev = nil, nil +} diff --git a/vendor/github.com/gomodule/redigo/redis/pool17.go b/vendor/github.com/gomodule/redigo/redis/pool17.go new file mode 100644 index 0000000000000..c1ea18ee37de2 --- /dev/null +++ b/vendor/github.com/gomodule/redigo/redis/pool17.go @@ -0,0 +1,35 @@ +// Copyright 2018 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// +build go1.7 + +package redis + +import "context" + +// GetContext gets a connection using the provided context. +// +// The provided Context must be non-nil. If the context expires before the +// connection is complete, an error is returned. Any expiration on the context +// will not affect the returned connection. +// +// If the function completes without error, then the application must close the +// returned connection. +func (p *Pool) GetContext(ctx context.Context) (Conn, error) { + pc, err := p.get(ctx) + if err != nil { + return errorConn{err}, err + } + return &activeConn{p: p, pc: pc}, nil +} diff --git a/vendor/github.com/gomodule/redigo/redis/pubsub.go b/vendor/github.com/gomodule/redigo/redis/pubsub.go new file mode 100644 index 0000000000000..2da60211d9f8b --- /dev/null +++ b/vendor/github.com/gomodule/redigo/redis/pubsub.go @@ -0,0 +1,148 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "errors" + "time" +) + +// Subscription represents a subscribe or unsubscribe notification. +type Subscription struct { + // Kind is "subscribe", "unsubscribe", "psubscribe" or "punsubscribe" + Kind string + + // The channel that was changed. + Channel string + + // The current number of subscriptions for connection. + Count int +} + +// Message represents a message notification. +type Message struct { + // The originating channel. + Channel string + + // The matched pattern, if any + Pattern string + + // The message data. + Data []byte +} + +// Pong represents a pubsub pong notification. +type Pong struct { + Data string +} + +// PubSubConn wraps a Conn with convenience methods for subscribers. +type PubSubConn struct { + Conn Conn +} + +// Close closes the connection. +func (c PubSubConn) Close() error { + return c.Conn.Close() +} + +// Subscribe subscribes the connection to the specified channels. +func (c PubSubConn) Subscribe(channel ...interface{}) error { + c.Conn.Send("SUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// PSubscribe subscribes the connection to the given patterns. +func (c PubSubConn) PSubscribe(channel ...interface{}) error { + c.Conn.Send("PSUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// Unsubscribe unsubscribes the connection from the given channels, or from all +// of them if none is given. +func (c PubSubConn) Unsubscribe(channel ...interface{}) error { + c.Conn.Send("UNSUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// PUnsubscribe unsubscribes the connection from the given patterns, or from all +// of them if none is given. +func (c PubSubConn) PUnsubscribe(channel ...interface{}) error { + c.Conn.Send("PUNSUBSCRIBE", channel...) + return c.Conn.Flush() +} + +// Ping sends a PING to the server with the specified data. +// +// The connection must be subscribed to at least one channel or pattern when +// calling this method. +func (c PubSubConn) Ping(data string) error { + c.Conn.Send("PING", data) + return c.Conn.Flush() +} + +// Receive returns a pushed message as a Subscription, Message, Pong or error. +// The return value is intended to be used directly in a type switch as +// illustrated in the PubSubConn example. +func (c PubSubConn) Receive() interface{} { + return c.receiveInternal(c.Conn.Receive()) +} + +// ReceiveWithTimeout is like Receive, but it allows the application to +// override the connection's default timeout. +func (c PubSubConn) ReceiveWithTimeout(timeout time.Duration) interface{} { + return c.receiveInternal(ReceiveWithTimeout(c.Conn, timeout)) +} + +func (c PubSubConn) receiveInternal(replyArg interface{}, errArg error) interface{} { + reply, err := Values(replyArg, errArg) + if err != nil { + return err + } + + var kind string + reply, err = Scan(reply, &kind) + if err != nil { + return err + } + + switch kind { + case "message": + var m Message + if _, err := Scan(reply, &m.Channel, &m.Data); err != nil { + return err + } + return m + case "pmessage": + var m Message + if _, err := Scan(reply, &m.Pattern, &m.Channel, &m.Data); err != nil { + return err + } + return m + case "subscribe", "psubscribe", "unsubscribe", "punsubscribe": + s := Subscription{Kind: kind} + if _, err := Scan(reply, &s.Channel, &s.Count); err != nil { + return err + } + return s + case "pong": + var p Pong + if _, err := Scan(reply, &p.Data); err != nil { + return err + } + return p + } + return errors.New("redigo: unknown pubsub notification") +} diff --git a/vendor/github.com/gomodule/redigo/redis/redis.go b/vendor/github.com/gomodule/redigo/redis/redis.go new file mode 100644 index 0000000000000..141fa4a91211e --- /dev/null +++ b/vendor/github.com/gomodule/redigo/redis/redis.go @@ -0,0 +1,117 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "errors" + "time" +) + +// Error represents an error returned in a command reply. +type Error string + +func (err Error) Error() string { return string(err) } + +// Conn represents a connection to a Redis server. +type Conn interface { + // Close closes the connection. + Close() error + + // Err returns a non-nil value when the connection is not usable. + Err() error + + // Do sends a command to the server and returns the received reply. + Do(commandName string, args ...interface{}) (reply interface{}, err error) + + // Send writes the command to the client's output buffer. + Send(commandName string, args ...interface{}) error + + // Flush flushes the output buffer to the Redis server. + Flush() error + + // Receive receives a single reply from the Redis server + Receive() (reply interface{}, err error) +} + +// Argument is the interface implemented by an object which wants to control how +// the object is converted to Redis bulk strings. +type Argument interface { + // RedisArg returns a value to be encoded as a bulk string per the + // conversions listed in the section 'Executing Commands'. + // Implementations should typically return a []byte or string. + RedisArg() interface{} +} + +// Scanner is implemented by an object which wants to control its value is +// interpreted when read from Redis. +type Scanner interface { + // RedisScan assigns a value from a Redis value. The argument src is one of + // the reply types listed in the section `Executing Commands`. + // + // An error should be returned if the value cannot be stored without + // loss of information. + RedisScan(src interface{}) error +} + +// ConnWithTimeout is an optional interface that allows the caller to override +// a connection's default read timeout. This interface is useful for executing +// the BLPOP, BRPOP, BRPOPLPUSH, XREAD and other commands that block at the +// server. +// +// A connection's default read timeout is set with the DialReadTimeout dial +// option. Applications should rely on the default timeout for commands that do +// not block at the server. +// +// All of the Conn implementations in this package satisfy the ConnWithTimeout +// interface. +// +// Use the DoWithTimeout and ReceiveWithTimeout helper functions to simplify +// use of this interface. +type ConnWithTimeout interface { + Conn + + // Do sends a command to the server and returns the received reply. + // The timeout overrides the read timeout set when dialing the + // connection. + DoWithTimeout(timeout time.Duration, commandName string, args ...interface{}) (reply interface{}, err error) + + // Receive receives a single reply from the Redis server. The timeout + // overrides the read timeout set when dialing the connection. + ReceiveWithTimeout(timeout time.Duration) (reply interface{}, err error) +} + +var errTimeoutNotSupported = errors.New("redis: connection does not support ConnWithTimeout") + +// DoWithTimeout executes a Redis command with the specified read timeout. If +// the connection does not satisfy the ConnWithTimeout interface, then an error +// is returned. +func DoWithTimeout(c Conn, timeout time.Duration, cmd string, args ...interface{}) (interface{}, error) { + cwt, ok := c.(ConnWithTimeout) + if !ok { + return nil, errTimeoutNotSupported + } + return cwt.DoWithTimeout(timeout, cmd, args...) +} + +// ReceiveWithTimeout receives a reply with the specified read timeout. If the +// connection does not satisfy the ConnWithTimeout interface, then an error is +// returned. +func ReceiveWithTimeout(c Conn, timeout time.Duration) (interface{}, error) { + cwt, ok := c.(ConnWithTimeout) + if !ok { + return nil, errTimeoutNotSupported + } + return cwt.ReceiveWithTimeout(timeout) +} diff --git a/vendor/github.com/gomodule/redigo/redis/reply.go b/vendor/github.com/gomodule/redigo/redis/reply.go new file mode 100644 index 0000000000000..c2b3b2b6e7484 --- /dev/null +++ b/vendor/github.com/gomodule/redigo/redis/reply.go @@ -0,0 +1,479 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "errors" + "fmt" + "strconv" +) + +// ErrNil indicates that a reply value is nil. +var ErrNil = errors.New("redigo: nil returned") + +// Int is a helper that converts a command reply to an integer. If err is not +// equal to nil, then Int returns 0, err. Otherwise, Int converts the +// reply to an int as follows: +// +// Reply type Result +// integer int(reply), nil +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Int(reply interface{}, err error) (int, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case int64: + x := int(reply) + if int64(x) != reply { + return 0, strconv.ErrRange + } + return x, nil + case []byte: + n, err := strconv.ParseInt(string(reply), 10, 0) + return int(n), err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Int, got type %T", reply) +} + +// Int64 is a helper that converts a command reply to 64 bit integer. If err is +// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the +// reply to an int64 as follows: +// +// Reply type Result +// integer reply, nil +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Int64(reply interface{}, err error) (int64, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case int64: + return reply, nil + case []byte: + n, err := strconv.ParseInt(string(reply), 10, 64) + return n, err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Int64, got type %T", reply) +} + +var errNegativeInt = errors.New("redigo: unexpected value for Uint64") + +// Uint64 is a helper that converts a command reply to 64 bit integer. If err is +// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the +// reply to an int64 as follows: +// +// Reply type Result +// integer reply, nil +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Uint64(reply interface{}, err error) (uint64, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case int64: + if reply < 0 { + return 0, errNegativeInt + } + return uint64(reply), nil + case []byte: + n, err := strconv.ParseUint(string(reply), 10, 64) + return n, err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Uint64, got type %T", reply) +} + +// Float64 is a helper that converts a command reply to 64 bit float. If err is +// not equal to nil, then Float64 returns 0, err. Otherwise, Float64 converts +// the reply to an int as follows: +// +// Reply type Result +// bulk string parsed reply, nil +// nil 0, ErrNil +// other 0, error +func Float64(reply interface{}, err error) (float64, error) { + if err != nil { + return 0, err + } + switch reply := reply.(type) { + case []byte: + n, err := strconv.ParseFloat(string(reply), 64) + return n, err + case nil: + return 0, ErrNil + case Error: + return 0, reply + } + return 0, fmt.Errorf("redigo: unexpected type for Float64, got type %T", reply) +} + +// String is a helper that converts a command reply to a string. If err is not +// equal to nil, then String returns "", err. Otherwise String converts the +// reply to a string as follows: +// +// Reply type Result +// bulk string string(reply), nil +// simple string reply, nil +// nil "", ErrNil +// other "", error +func String(reply interface{}, err error) (string, error) { + if err != nil { + return "", err + } + switch reply := reply.(type) { + case []byte: + return string(reply), nil + case string: + return reply, nil + case nil: + return "", ErrNil + case Error: + return "", reply + } + return "", fmt.Errorf("redigo: unexpected type for String, got type %T", reply) +} + +// Bytes is a helper that converts a command reply to a slice of bytes. If err +// is not equal to nil, then Bytes returns nil, err. Otherwise Bytes converts +// the reply to a slice of bytes as follows: +// +// Reply type Result +// bulk string reply, nil +// simple string []byte(reply), nil +// nil nil, ErrNil +// other nil, error +func Bytes(reply interface{}, err error) ([]byte, error) { + if err != nil { + return nil, err + } + switch reply := reply.(type) { + case []byte: + return reply, nil + case string: + return []byte(reply), nil + case nil: + return nil, ErrNil + case Error: + return nil, reply + } + return nil, fmt.Errorf("redigo: unexpected type for Bytes, got type %T", reply) +} + +// Bool is a helper that converts a command reply to a boolean. If err is not +// equal to nil, then Bool returns false, err. Otherwise Bool converts the +// reply to boolean as follows: +// +// Reply type Result +// integer value != 0, nil +// bulk string strconv.ParseBool(reply) +// nil false, ErrNil +// other false, error +func Bool(reply interface{}, err error) (bool, error) { + if err != nil { + return false, err + } + switch reply := reply.(type) { + case int64: + return reply != 0, nil + case []byte: + return strconv.ParseBool(string(reply)) + case nil: + return false, ErrNil + case Error: + return false, reply + } + return false, fmt.Errorf("redigo: unexpected type for Bool, got type %T", reply) +} + +// MultiBulk is a helper that converts an array command reply to a []interface{}. +// +// Deprecated: Use Values instead. +func MultiBulk(reply interface{}, err error) ([]interface{}, error) { return Values(reply, err) } + +// Values is a helper that converts an array command reply to a []interface{}. +// If err is not equal to nil, then Values returns nil, err. Otherwise, Values +// converts the reply as follows: +// +// Reply type Result +// array reply, nil +// nil nil, ErrNil +// other nil, error +func Values(reply interface{}, err error) ([]interface{}, error) { + if err != nil { + return nil, err + } + switch reply := reply.(type) { + case []interface{}: + return reply, nil + case nil: + return nil, ErrNil + case Error: + return nil, reply + } + return nil, fmt.Errorf("redigo: unexpected type for Values, got type %T", reply) +} + +func sliceHelper(reply interface{}, err error, name string, makeSlice func(int), assign func(int, interface{}) error) error { + if err != nil { + return err + } + switch reply := reply.(type) { + case []interface{}: + makeSlice(len(reply)) + for i := range reply { + if reply[i] == nil { + continue + } + if err := assign(i, reply[i]); err != nil { + return err + } + } + return nil + case nil: + return ErrNil + case Error: + return reply + } + return fmt.Errorf("redigo: unexpected type for %s, got type %T", name, reply) +} + +// Float64s is a helper that converts an array command reply to a []float64. If +// err is not equal to nil, then Float64s returns nil, err. Nil array items are +// converted to 0 in the output slice. Floats64 returns an error if an array +// item is not a bulk string or nil. +func Float64s(reply interface{}, err error) ([]float64, error) { + var result []float64 + err = sliceHelper(reply, err, "Float64s", func(n int) { result = make([]float64, n) }, func(i int, v interface{}) error { + p, ok := v.([]byte) + if !ok { + return fmt.Errorf("redigo: unexpected element type for Floats64, got type %T", v) + } + f, err := strconv.ParseFloat(string(p), 64) + result[i] = f + return err + }) + return result, err +} + +// Strings is a helper that converts an array command reply to a []string. If +// err is not equal to nil, then Strings returns nil, err. Nil array items are +// converted to "" in the output slice. Strings returns an error if an array +// item is not a bulk string or nil. +func Strings(reply interface{}, err error) ([]string, error) { + var result []string + err = sliceHelper(reply, err, "Strings", func(n int) { result = make([]string, n) }, func(i int, v interface{}) error { + switch v := v.(type) { + case string: + result[i] = v + return nil + case []byte: + result[i] = string(v) + return nil + default: + return fmt.Errorf("redigo: unexpected element type for Strings, got type %T", v) + } + }) + return result, err +} + +// ByteSlices is a helper that converts an array command reply to a [][]byte. +// If err is not equal to nil, then ByteSlices returns nil, err. Nil array +// items are stay nil. ByteSlices returns an error if an array item is not a +// bulk string or nil. +func ByteSlices(reply interface{}, err error) ([][]byte, error) { + var result [][]byte + err = sliceHelper(reply, err, "ByteSlices", func(n int) { result = make([][]byte, n) }, func(i int, v interface{}) error { + p, ok := v.([]byte) + if !ok { + return fmt.Errorf("redigo: unexpected element type for ByteSlices, got type %T", v) + } + result[i] = p + return nil + }) + return result, err +} + +// Int64s is a helper that converts an array command reply to a []int64. +// If err is not equal to nil, then Int64s returns nil, err. Nil array +// items are stay nil. Int64s returns an error if an array item is not a +// bulk string or nil. +func Int64s(reply interface{}, err error) ([]int64, error) { + var result []int64 + err = sliceHelper(reply, err, "Int64s", func(n int) { result = make([]int64, n) }, func(i int, v interface{}) error { + switch v := v.(type) { + case int64: + result[i] = v + return nil + case []byte: + n, err := strconv.ParseInt(string(v), 10, 64) + result[i] = n + return err + default: + return fmt.Errorf("redigo: unexpected element type for Int64s, got type %T", v) + } + }) + return result, err +} + +// Ints is a helper that converts an array command reply to a []in. +// If err is not equal to nil, then Ints returns nil, err. Nil array +// items are stay nil. Ints returns an error if an array item is not a +// bulk string or nil. +func Ints(reply interface{}, err error) ([]int, error) { + var result []int + err = sliceHelper(reply, err, "Ints", func(n int) { result = make([]int, n) }, func(i int, v interface{}) error { + switch v := v.(type) { + case int64: + n := int(v) + if int64(n) != v { + return strconv.ErrRange + } + result[i] = n + return nil + case []byte: + n, err := strconv.Atoi(string(v)) + result[i] = n + return err + default: + return fmt.Errorf("redigo: unexpected element type for Ints, got type %T", v) + } + }) + return result, err +} + +// StringMap is a helper that converts an array of strings (alternating key, value) +// into a map[string]string. The HGETALL and CONFIG GET commands return replies in this format. +// Requires an even number of values in result. +func StringMap(result interface{}, err error) (map[string]string, error) { + values, err := Values(result, err) + if err != nil { + return nil, err + } + if len(values)%2 != 0 { + return nil, errors.New("redigo: StringMap expects even number of values result") + } + m := make(map[string]string, len(values)/2) + for i := 0; i < len(values); i += 2 { + key, okKey := values[i].([]byte) + value, okValue := values[i+1].([]byte) + if !okKey || !okValue { + return nil, errors.New("redigo: StringMap key not a bulk string value") + } + m[string(key)] = string(value) + } + return m, nil +} + +// IntMap is a helper that converts an array of strings (alternating key, value) +// into a map[string]int. The HGETALL commands return replies in this format. +// Requires an even number of values in result. +func IntMap(result interface{}, err error) (map[string]int, error) { + values, err := Values(result, err) + if err != nil { + return nil, err + } + if len(values)%2 != 0 { + return nil, errors.New("redigo: IntMap expects even number of values result") + } + m := make(map[string]int, len(values)/2) + for i := 0; i < len(values); i += 2 { + key, ok := values[i].([]byte) + if !ok { + return nil, errors.New("redigo: IntMap key not a bulk string value") + } + value, err := Int(values[i+1], nil) + if err != nil { + return nil, err + } + m[string(key)] = value + } + return m, nil +} + +// Int64Map is a helper that converts an array of strings (alternating key, value) +// into a map[string]int64. The HGETALL commands return replies in this format. +// Requires an even number of values in result. +func Int64Map(result interface{}, err error) (map[string]int64, error) { + values, err := Values(result, err) + if err != nil { + return nil, err + } + if len(values)%2 != 0 { + return nil, errors.New("redigo: Int64Map expects even number of values result") + } + m := make(map[string]int64, len(values)/2) + for i := 0; i < len(values); i += 2 { + key, ok := values[i].([]byte) + if !ok { + return nil, errors.New("redigo: Int64Map key not a bulk string value") + } + value, err := Int64(values[i+1], nil) + if err != nil { + return nil, err + } + m[string(key)] = value + } + return m, nil +} + +// Positions is a helper that converts an array of positions (lat, long) +// into a [][2]float64. The GEOPOS command returns replies in this format. +func Positions(result interface{}, err error) ([]*[2]float64, error) { + values, err := Values(result, err) + if err != nil { + return nil, err + } + positions := make([]*[2]float64, len(values)) + for i := range values { + if values[i] == nil { + continue + } + p, ok := values[i].([]interface{}) + if !ok { + return nil, fmt.Errorf("redigo: unexpected element type for interface slice, got type %T", values[i]) + } + if len(p) != 2 { + return nil, fmt.Errorf("redigo: unexpected number of values for a member position, got %d", len(p)) + } + lat, err := Float64(p[0], nil) + if err != nil { + return nil, err + } + long, err := Float64(p[1], nil) + if err != nil { + return nil, err + } + positions[i] = &[2]float64{lat, long} + } + return positions, nil +} diff --git a/vendor/github.com/gomodule/redigo/redis/scan.go b/vendor/github.com/gomodule/redigo/redis/scan.go new file mode 100644 index 0000000000000..ef9551bd476a6 --- /dev/null +++ b/vendor/github.com/gomodule/redigo/redis/scan.go @@ -0,0 +1,585 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "sync" +) + +func ensureLen(d reflect.Value, n int) { + if n > d.Cap() { + d.Set(reflect.MakeSlice(d.Type(), n, n)) + } else { + d.SetLen(n) + } +} + +func cannotConvert(d reflect.Value, s interface{}) error { + var sname string + switch s.(type) { + case string: + sname = "Redis simple string" + case Error: + sname = "Redis error" + case int64: + sname = "Redis integer" + case []byte: + sname = "Redis bulk string" + case []interface{}: + sname = "Redis array" + default: + sname = reflect.TypeOf(s).String() + } + return fmt.Errorf("cannot convert from %s to %s", sname, d.Type()) +} + +func convertAssignBulkString(d reflect.Value, s []byte) (err error) { + switch d.Type().Kind() { + case reflect.Float32, reflect.Float64: + var x float64 + x, err = strconv.ParseFloat(string(s), d.Type().Bits()) + d.SetFloat(x) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + var x int64 + x, err = strconv.ParseInt(string(s), 10, d.Type().Bits()) + d.SetInt(x) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + var x uint64 + x, err = strconv.ParseUint(string(s), 10, d.Type().Bits()) + d.SetUint(x) + case reflect.Bool: + var x bool + x, err = strconv.ParseBool(string(s)) + d.SetBool(x) + case reflect.String: + d.SetString(string(s)) + case reflect.Slice: + if d.Type().Elem().Kind() != reflect.Uint8 { + err = cannotConvert(d, s) + } else { + d.SetBytes(s) + } + default: + err = cannotConvert(d, s) + } + return +} + +func convertAssignInt(d reflect.Value, s int64) (err error) { + switch d.Type().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + d.SetInt(s) + if d.Int() != s { + err = strconv.ErrRange + d.SetInt(0) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if s < 0 { + err = strconv.ErrRange + } else { + x := uint64(s) + d.SetUint(x) + if d.Uint() != x { + err = strconv.ErrRange + d.SetUint(0) + } + } + case reflect.Bool: + d.SetBool(s != 0) + default: + err = cannotConvert(d, s) + } + return +} + +func convertAssignValue(d reflect.Value, s interface{}) (err error) { + if d.Kind() != reflect.Ptr { + if d.CanAddr() { + d2 := d.Addr() + if d2.CanInterface() { + if scanner, ok := d2.Interface().(Scanner); ok { + return scanner.RedisScan(s) + } + } + } + } else if d.CanInterface() { + // Already a reflect.Ptr + if d.IsNil() { + d.Set(reflect.New(d.Type().Elem())) + } + if scanner, ok := d.Interface().(Scanner); ok { + return scanner.RedisScan(s) + } + } + + switch s := s.(type) { + case []byte: + err = convertAssignBulkString(d, s) + case int64: + err = convertAssignInt(d, s) + default: + err = cannotConvert(d, s) + } + return err +} + +func convertAssignArray(d reflect.Value, s []interface{}) error { + if d.Type().Kind() != reflect.Slice { + return cannotConvert(d, s) + } + ensureLen(d, len(s)) + for i := 0; i < len(s); i++ { + if err := convertAssignValue(d.Index(i), s[i]); err != nil { + return err + } + } + return nil +} + +func convertAssign(d interface{}, s interface{}) (err error) { + if scanner, ok := d.(Scanner); ok { + return scanner.RedisScan(s) + } + + // Handle the most common destination types using type switches and + // fall back to reflection for all other types. + switch s := s.(type) { + case nil: + // ignore + case []byte: + switch d := d.(type) { + case *string: + *d = string(s) + case *int: + *d, err = strconv.Atoi(string(s)) + case *bool: + *d, err = strconv.ParseBool(string(s)) + case *[]byte: + *d = s + case *interface{}: + *d = s + case nil: + // skip value + default: + if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { + err = cannotConvert(d, s) + } else { + err = convertAssignBulkString(d.Elem(), s) + } + } + case int64: + switch d := d.(type) { + case *int: + x := int(s) + if int64(x) != s { + err = strconv.ErrRange + x = 0 + } + *d = x + case *bool: + *d = s != 0 + case *interface{}: + *d = s + case nil: + // skip value + default: + if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { + err = cannotConvert(d, s) + } else { + err = convertAssignInt(d.Elem(), s) + } + } + case string: + switch d := d.(type) { + case *string: + *d = s + case *interface{}: + *d = s + case nil: + // skip value + default: + err = cannotConvert(reflect.ValueOf(d), s) + } + case []interface{}: + switch d := d.(type) { + case *[]interface{}: + *d = s + case *interface{}: + *d = s + case nil: + // skip value + default: + if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { + err = cannotConvert(d, s) + } else { + err = convertAssignArray(d.Elem(), s) + } + } + case Error: + err = s + default: + err = cannotConvert(reflect.ValueOf(d), s) + } + return +} + +// Scan copies from src to the values pointed at by dest. +// +// Scan uses RedisScan if available otherwise: +// +// The values pointed at by dest must be an integer, float, boolean, string, +// []byte, interface{} or slices of these types. Scan uses the standard strconv +// package to convert bulk strings to numeric and boolean types. +// +// If a dest value is nil, then the corresponding src value is skipped. +// +// If a src element is nil, then the corresponding dest value is not modified. +// +// To enable easy use of Scan in a loop, Scan returns the slice of src +// following the copied values. +func Scan(src []interface{}, dest ...interface{}) ([]interface{}, error) { + if len(src) < len(dest) { + return nil, errors.New("redigo.Scan: array short") + } + var err error + for i, d := range dest { + err = convertAssign(d, src[i]) + if err != nil { + err = fmt.Errorf("redigo.Scan: cannot assign to dest %d: %v", i, err) + break + } + } + return src[len(dest):], err +} + +type fieldSpec struct { + name string + index []int + omitEmpty bool +} + +type structSpec struct { + m map[string]*fieldSpec + l []*fieldSpec +} + +func (ss *structSpec) fieldSpec(name []byte) *fieldSpec { + return ss.m[string(name)] +} + +func compileStructSpec(t reflect.Type, depth map[string]int, index []int, ss *structSpec) { + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + switch { + case f.PkgPath != "" && !f.Anonymous: + // Ignore unexported fields. + case f.Anonymous: + // TODO: Handle pointers. Requires change to decoder and + // protection against infinite recursion. + if f.Type.Kind() == reflect.Struct { + compileStructSpec(f.Type, depth, append(index, i), ss) + } + default: + fs := &fieldSpec{name: f.Name} + tag := f.Tag.Get("redis") + p := strings.Split(tag, ",") + if len(p) > 0 { + if p[0] == "-" { + continue + } + if len(p[0]) > 0 { + fs.name = p[0] + } + for _, s := range p[1:] { + switch s { + case "omitempty": + fs.omitEmpty = true + default: + panic(fmt.Errorf("redigo: unknown field tag %s for type %s", s, t.Name())) + } + } + } + d, found := depth[fs.name] + if !found { + d = 1 << 30 + } + switch { + case len(index) == d: + // At same depth, remove from result. + delete(ss.m, fs.name) + j := 0 + for i := 0; i < len(ss.l); i++ { + if fs.name != ss.l[i].name { + ss.l[j] = ss.l[i] + j += 1 + } + } + ss.l = ss.l[:j] + case len(index) < d: + fs.index = make([]int, len(index)+1) + copy(fs.index, index) + fs.index[len(index)] = i + depth[fs.name] = len(index) + ss.m[fs.name] = fs + ss.l = append(ss.l, fs) + } + } + } +} + +var ( + structSpecMutex sync.RWMutex + structSpecCache = make(map[reflect.Type]*structSpec) + defaultFieldSpec = &fieldSpec{} +) + +func structSpecForType(t reflect.Type) *structSpec { + + structSpecMutex.RLock() + ss, found := structSpecCache[t] + structSpecMutex.RUnlock() + if found { + return ss + } + + structSpecMutex.Lock() + defer structSpecMutex.Unlock() + ss, found = structSpecCache[t] + if found { + return ss + } + + ss = &structSpec{m: make(map[string]*fieldSpec)} + compileStructSpec(t, make(map[string]int), nil, ss) + structSpecCache[t] = ss + return ss +} + +var errScanStructValue = errors.New("redigo.ScanStruct: value must be non-nil pointer to a struct") + +// ScanStruct scans alternating names and values from src to a struct. The +// HGETALL and CONFIG GET commands return replies in this format. +// +// ScanStruct uses exported field names to match values in the response. Use +// 'redis' field tag to override the name: +// +// Field int `redis:"myName"` +// +// Fields with the tag redis:"-" are ignored. +// +// Each field uses RedisScan if available otherwise: +// Integer, float, boolean, string and []byte fields are supported. Scan uses the +// standard strconv package to convert bulk string values to numeric and +// boolean types. +// +// If a src element is nil, then the corresponding field is not modified. +func ScanStruct(src []interface{}, dest interface{}) error { + d := reflect.ValueOf(dest) + if d.Kind() != reflect.Ptr || d.IsNil() { + return errScanStructValue + } + d = d.Elem() + if d.Kind() != reflect.Struct { + return errScanStructValue + } + ss := structSpecForType(d.Type()) + + if len(src)%2 != 0 { + return errors.New("redigo.ScanStruct: number of values not a multiple of 2") + } + + for i := 0; i < len(src); i += 2 { + s := src[i+1] + if s == nil { + continue + } + name, ok := src[i].([]byte) + if !ok { + return fmt.Errorf("redigo.ScanStruct: key %d not a bulk string value", i) + } + fs := ss.fieldSpec(name) + if fs == nil { + continue + } + if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { + return fmt.Errorf("redigo.ScanStruct: cannot assign field %s: %v", fs.name, err) + } + } + return nil +} + +var ( + errScanSliceValue = errors.New("redigo.ScanSlice: dest must be non-nil pointer to a struct") +) + +// ScanSlice scans src to the slice pointed to by dest. The elements the dest +// slice must be integer, float, boolean, string, struct or pointer to struct +// values. +// +// Struct fields must be integer, float, boolean or string values. All struct +// fields are used unless a subset is specified using fieldNames. +func ScanSlice(src []interface{}, dest interface{}, fieldNames ...string) error { + d := reflect.ValueOf(dest) + if d.Kind() != reflect.Ptr || d.IsNil() { + return errScanSliceValue + } + d = d.Elem() + if d.Kind() != reflect.Slice { + return errScanSliceValue + } + + isPtr := false + t := d.Type().Elem() + if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { + isPtr = true + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + ensureLen(d, len(src)) + for i, s := range src { + if s == nil { + continue + } + if err := convertAssignValue(d.Index(i), s); err != nil { + return fmt.Errorf("redigo.ScanSlice: cannot assign element %d: %v", i, err) + } + } + return nil + } + + ss := structSpecForType(t) + fss := ss.l + if len(fieldNames) > 0 { + fss = make([]*fieldSpec, len(fieldNames)) + for i, name := range fieldNames { + fss[i] = ss.m[name] + if fss[i] == nil { + return fmt.Errorf("redigo.ScanSlice: ScanSlice bad field name %s", name) + } + } + } + + if len(fss) == 0 { + return errors.New("redigo.ScanSlice: no struct fields") + } + + n := len(src) / len(fss) + if n*len(fss) != len(src) { + return errors.New("redigo.ScanSlice: length not a multiple of struct field count") + } + + ensureLen(d, n) + for i := 0; i < n; i++ { + d := d.Index(i) + if isPtr { + if d.IsNil() { + d.Set(reflect.New(t)) + } + d = d.Elem() + } + for j, fs := range fss { + s := src[i*len(fss)+j] + if s == nil { + continue + } + if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { + return fmt.Errorf("redigo.ScanSlice: cannot assign element %d to field %s: %v", i*len(fss)+j, fs.name, err) + } + } + } + return nil +} + +// Args is a helper for constructing command arguments from structured values. +type Args []interface{} + +// Add returns the result of appending value to args. +func (args Args) Add(value ...interface{}) Args { + return append(args, value...) +} + +// AddFlat returns the result of appending the flattened value of v to args. +// +// Maps are flattened by appending the alternating keys and map values to args. +// +// Slices are flattened by appending the slice elements to args. +// +// Structs are flattened by appending the alternating names and values of +// exported fields to args. If v is a nil struct pointer, then nothing is +// appended. The 'redis' field tag overrides struct field names. See ScanStruct +// for more information on the use of the 'redis' field tag. +// +// Other types are appended to args as is. +func (args Args) AddFlat(v interface{}) Args { + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Struct: + args = flattenStruct(args, rv) + case reflect.Slice: + for i := 0; i < rv.Len(); i++ { + args = append(args, rv.Index(i).Interface()) + } + case reflect.Map: + for _, k := range rv.MapKeys() { + args = append(args, k.Interface(), rv.MapIndex(k).Interface()) + } + case reflect.Ptr: + if rv.Type().Elem().Kind() == reflect.Struct { + if !rv.IsNil() { + args = flattenStruct(args, rv.Elem()) + } + } else { + args = append(args, v) + } + default: + args = append(args, v) + } + return args +} + +func flattenStruct(args Args, v reflect.Value) Args { + ss := structSpecForType(v.Type()) + for _, fs := range ss.l { + fv := v.FieldByIndex(fs.index) + if fs.omitEmpty { + var empty = false + switch fv.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + empty = fv.Len() == 0 + case reflect.Bool: + empty = !fv.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + empty = fv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + empty = fv.Uint() == 0 + case reflect.Float32, reflect.Float64: + empty = fv.Float() == 0 + case reflect.Interface, reflect.Ptr: + empty = fv.IsNil() + } + if empty { + continue + } + } + args = append(args, fs.name, fv.Interface()) + } + return args +} diff --git a/vendor/github.com/gomodule/redigo/redis/script.go b/vendor/github.com/gomodule/redigo/redis/script.go new file mode 100644 index 0000000000000..0ef1c821f7144 --- /dev/null +++ b/vendor/github.com/gomodule/redigo/redis/script.go @@ -0,0 +1,91 @@ +// Copyright 2012 Gary Burd +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package redis + +import ( + "crypto/sha1" + "encoding/hex" + "io" + "strings" +) + +// Script encapsulates the source, hash and key count for a Lua script. See +// http://redis.io/commands/eval for information on scripts in Redis. +type Script struct { + keyCount int + src string + hash string +} + +// NewScript returns a new script object. If keyCount is greater than or equal +// to zero, then the count is automatically inserted in the EVAL command +// argument list. If keyCount is less than zero, then the application supplies +// the count as the first value in the keysAndArgs argument to the Do, Send and +// SendHash methods. +func NewScript(keyCount int, src string) *Script { + h := sha1.New() + io.WriteString(h, src) + return &Script{keyCount, src, hex.EncodeToString(h.Sum(nil))} +} + +func (s *Script) args(spec string, keysAndArgs []interface{}) []interface{} { + var args []interface{} + if s.keyCount < 0 { + args = make([]interface{}, 1+len(keysAndArgs)) + args[0] = spec + copy(args[1:], keysAndArgs) + } else { + args = make([]interface{}, 2+len(keysAndArgs)) + args[0] = spec + args[1] = s.keyCount + copy(args[2:], keysAndArgs) + } + return args +} + +// Hash returns the script hash. +func (s *Script) Hash() string { + return s.hash +} + +// Do evaluates the script. Under the covers, Do optimistically evaluates the +// script using the EVALSHA command. If the command fails because the script is +// not loaded, then Do evaluates the script using the EVAL command (thus +// causing the script to load). +func (s *Script) Do(c Conn, keysAndArgs ...interface{}) (interface{}, error) { + v, err := c.Do("EVALSHA", s.args(s.hash, keysAndArgs)...) + if e, ok := err.(Error); ok && strings.HasPrefix(string(e), "NOSCRIPT ") { + v, err = c.Do("EVAL", s.args(s.src, keysAndArgs)...) + } + return v, err +} + +// SendHash evaluates the script without waiting for the reply. The script is +// evaluated with the EVALSHA command. The application must ensure that the +// script is loaded by a previous call to Send, Do or Load methods. +func (s *Script) SendHash(c Conn, keysAndArgs ...interface{}) error { + return c.Send("EVALSHA", s.args(s.hash, keysAndArgs)...) +} + +// Send evaluates the script without waiting for the reply. +func (s *Script) Send(c Conn, keysAndArgs ...interface{}) error { + return c.Send("EVAL", s.args(s.src, keysAndArgs)...) +} + +// Load loads the script without evaluating it. +func (s *Script) Load(c Conn) error { + _, err := c.Do("SCRIPT", "LOAD", s.src) + return err +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index 8e473d0fe92d2..0327865eeec70 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -14,9 +14,10 @@ package expfmt import ( - "bytes" + "bufio" "fmt" "io" + "io/ioutil" "math" "strconv" "strings" @@ -27,7 +28,7 @@ import ( dto "github.com/prometheus/client_model/go" ) -// enhancedWriter has all the enhanced write functions needed here. bytes.Buffer +// enhancedWriter has all the enhanced write functions needed here. bufio.Writer // implements it. type enhancedWriter interface { io.Writer @@ -37,14 +38,13 @@ type enhancedWriter interface { } const ( - initialBufSize = 512 initialNumBufSize = 24 ) var ( bufPool = sync.Pool{ New: func() interface{} { - return bytes.NewBuffer(make([]byte, 0, initialBufSize)) + return bufio.NewWriter(ioutil.Discard) }, } numBufPool = sync.Pool{ @@ -75,16 +75,14 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e } // Try the interface upgrade. If it doesn't work, we'll use a - // bytes.Buffer from the sync.Pool and write out its content to out in a - // single go in the end. + // bufio.Writer from the sync.Pool. w, ok := out.(enhancedWriter) if !ok { - b := bufPool.Get().(*bytes.Buffer) - b.Reset() + b := bufPool.Get().(*bufio.Writer) + b.Reset(out) w = b defer func() { - bWritten, bErr := out.Write(b.Bytes()) - written = bWritten + bErr := b.Flush() if err == nil { err = bErr } diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index ec3d86ba7ce12..342e5940d0f7d 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -325,7 +325,7 @@ func (p *TextParser) startLabelValue() stateFn { // - Other labels have to be added to currentLabels for signature calculation. if p.currentMF.GetType() == dto.MetricType_SUMMARY { if p.currentLabelPair.GetName() == model.QuantileLabel { - if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) return nil @@ -337,7 +337,7 @@ func (p *TextParser) startLabelValue() stateFn { // Similar special treatment of histograms. if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { if p.currentLabelPair.GetName() == model.BucketLabel { - if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) return nil @@ -392,7 +392,7 @@ func (p *TextParser) readingValue() stateFn { if p.readTokenUntilWhitespace(); p.err != nil { return nil // Unexpected end of input. } - value, err := strconv.ParseFloat(p.currentToken.String(), 64) + value, err := parseFloat(p.currentToken.String()) if err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) @@ -755,3 +755,10 @@ func histogramMetricName(name string) string { return name } } + +func parseFloat(s string) (float64, error) { + if strings.ContainsAny(s, "pP_") { + return 0, fmt.Errorf("unsupported character in float") + } + return strconv.ParseFloat(s, 64) +} diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/node.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/node.go index 439d76839b82e..973be2809ba72 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/node.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/node.go @@ -196,9 +196,11 @@ func (n *Node) buildNode(node *apiv1.Node) *targetgroup.Group { // nodeAddresses returns the provided node's address, based on the priority: // 1. NodeInternalIP -// 2. NodeExternalIP -// 3. NodeLegacyHostIP -// 3. NodeHostName +// 2. NodeInternalDNS +// 3. NodeExternalIP +// 4. NodeExternalDNS +// 5. NodeLegacyHostIP +// 6. NodeHostName // // Derived from k8s.io/kubernetes/pkg/util/node/node.go func nodeAddress(node *apiv1.Node) (string, map[apiv1.NodeAddressType][]string, error) { @@ -210,9 +212,15 @@ func nodeAddress(node *apiv1.Node) (string, map[apiv1.NodeAddressType][]string, if addresses, ok := m[apiv1.NodeInternalIP]; ok { return addresses[0], m, nil } + if addresses, ok := m[apiv1.NodeInternalDNS]; ok { + return addresses[0], m, nil + } if addresses, ok := m[apiv1.NodeExternalIP]; ok { return addresses[0], m, nil } + if addresses, ok := m[apiv1.NodeExternalDNS]; ok { + return addresses[0], m, nil + } if addresses, ok := m[apiv1.NodeAddressType(NodeLegacyHostIP)]; ok { return addresses[0], m, nil } diff --git a/vendor/github.com/prometheus/prometheus/discovery/manager.go b/vendor/github.com/prometheus/prometheus/discovery/manager.go index 1dbdecc8d0fef..4625e42a3178a 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/manager.go +++ b/vendor/github.com/prometheus/prometheus/discovery/manager.go @@ -191,6 +191,9 @@ func (m *Manager) ApplyConfig(cfg map[string]sd_config.ServiceDiscoveryConfig) e } } m.cancelDiscoverers() + m.targets = make(map[poolKey]map[string]*targetgroup.Group) + m.providers = nil + m.discoverCancel = nil for name, scfg := range cfg { m.registerProviders(scfg, name) discoveredTargets.WithLabelValues(m.name, name).Set(0) @@ -280,9 +283,6 @@ func (m *Manager) cancelDiscoverers() { for _, c := range m.discoverCancel { c() } - m.targets = make(map[poolKey]map[string]*targetgroup.Group) - m.providers = nil - m.discoverCancel = nil } func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) { diff --git a/vendor/github.com/prometheus/prometheus/pkg/gate/gate.go b/vendor/github.com/prometheus/prometheus/pkg/gate/gate.go index 3d4fddb75b413..6cb9d583c6cd8 100644 --- a/vendor/github.com/prometheus/prometheus/pkg/gate/gate.go +++ b/vendor/github.com/prometheus/prometheus/pkg/gate/gate.go @@ -20,7 +20,7 @@ type Gate struct { ch chan struct{} } -// NewGate returns a query gate that limits the number of queries +// New returns a query gate that limits the number of queries // being concurrently executed. func New(length int) *Gate { return &Gate{ diff --git a/vendor/github.com/prometheus/prometheus/pkg/textparse/openmetricsparse.go b/vendor/github.com/prometheus/prometheus/pkg/textparse/openmetricsparse.go index a1b0d6977e804..ed76bc39578ec 100644 --- a/vendor/github.com/prometheus/prometheus/pkg/textparse/openmetricsparse.go +++ b/vendor/github.com/prometheus/prometheus/pkg/textparse/openmetricsparse.go @@ -20,7 +20,6 @@ import ( "io" "math" "sort" - "strconv" "strings" "unicode/utf8" @@ -44,7 +43,10 @@ func (l *openMetricsLexer) buf() []byte { } func (l *openMetricsLexer) cur() byte { - return l.b[l.i] + if l.i < len(l.b) { + return l.b[l.i] + } + return byte(' ') } // next advances the openMetricsLexer to the next character. @@ -85,7 +87,7 @@ type OpenMetricsParser struct { offsets []int } -// New returns a new parser of the byte slice. +// NewOpenMetricsParser returns a new parser of the byte slice. func NewOpenMetricsParser(b []byte) Parser { return &OpenMetricsParser{l: &openMetricsLexer{b: b}} } @@ -265,7 +267,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) { if t2 != tValue { return EntryInvalid, parseError("expected value after metric", t) } - if p.val, err = strconv.ParseFloat(yoloString(p.l.buf()[1:]), 64); err != nil { + if p.val, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil { return EntryInvalid, err } // Ensure canonical NaN value. @@ -280,7 +282,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) { p.hasTS = true var ts float64 // A float is enough to hold what we need for millisecond resolution. - if ts, err = strconv.ParseFloat(yoloString(p.l.buf()[1:]), 64); err != nil { + if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil { return EntryInvalid, err } p.ts = int64(ts * 1000) diff --git a/vendor/github.com/prometheus/prometheus/pkg/textparse/promparse.go b/vendor/github.com/prometheus/prometheus/pkg/textparse/promparse.go index f05b87abd3bea..69fa51c3dddc8 100644 --- a/vendor/github.com/prometheus/prometheus/pkg/textparse/promparse.go +++ b/vendor/github.com/prometheus/prometheus/pkg/textparse/promparse.go @@ -153,7 +153,7 @@ type PromParser struct { offsets []int } -// New returns a new parser of the byte slice. +// NewPromParser returns a new parser of the byte slice. func NewPromParser(b []byte) Parser { return &PromParser{l: &promlexer{b: append(b, '\n')}} } @@ -332,7 +332,7 @@ func (p *PromParser) Next() (Entry, error) { if t2 != tValue { return EntryInvalid, parseError("expected value after metric", t) } - if p.val, err = strconv.ParseFloat(yoloString(p.l.buf()), 64); err != nil { + if p.val, err = parseFloat(yoloString(p.l.buf())); err != nil { return EntryInvalid, err } // Ensure canonical NaN value. @@ -409,3 +409,11 @@ var helpReplacer = strings.NewReplacer( func yoloString(b []byte) string { return *((*string)(unsafe.Pointer(&b))) } + +func parseFloat(s string) (float64, error) { + // Keep to pre-Go 1.13 float formats. + if strings.ContainsAny(s, "pP_") { + return 0, fmt.Errorf("unsupported character in float") + } + return strconv.ParseFloat(s, 64) +} diff --git a/vendor/github.com/prometheus/prometheus/promql/query_logger.go b/vendor/github.com/prometheus/prometheus/promql/query_logger.go index 0687818a2fe91..256c3d775c448 100644 --- a/vendor/github.com/prometheus/prometheus/promql/query_logger.go +++ b/vendor/github.com/prometheus/prometheus/promql/query_logger.go @@ -15,14 +15,15 @@ package promql import ( "encoding/json" - "github.com/edsrzf/mmap-go" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" "os" "path/filepath" "strings" "time" "unicode/utf8" + + "github.com/edsrzf/mmap-go" + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" ) type ActiveQueryTracker struct { @@ -75,27 +76,27 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) { } } -func getMMapedFile(filename string, filesize int, logger log.Logger) (error, []byte) { +func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, error) { file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0644) if err != nil { level.Error(logger).Log("msg", "Error opening query log file", "file", filename, "err", err) - return err, []byte{} + return nil, err } err = file.Truncate(int64(filesize)) if err != nil { level.Error(logger).Log("msg", "Error setting filesize.", "filesize", filesize, "err", err) - return err, []byte{} + return nil, err } fileAsBytes, err := mmap.Map(file, mmap.RDWR, 0) if err != nil { level.Error(logger).Log("msg", "Failed to mmap", "file", filename, "Attempted size", filesize, "err", err) - return err, []byte{} + return nil, err } - return err, fileAsBytes + return fileAsBytes, err } func NewActiveQueryTracker(localStoragePath string, maxQueries int, logger log.Logger) *ActiveQueryTracker { @@ -107,7 +108,7 @@ func NewActiveQueryTracker(localStoragePath string, maxQueries int, logger log.L filename, filesize := filepath.Join(localStoragePath, "queries.active"), 1+maxQueries*entrySize logUnfinishedQueries(filename, filesize, logger) - err, fileAsBytes := getMMapedFile(filename, filesize, logger) + fileAsBytes, err := getMMapedFile(filename, filesize, logger) if err != nil { panic("Unable to create mmap-ed active query log") } @@ -130,7 +131,7 @@ func trimStringByBytes(str string, size int) string { trimIndex := len(bytesStr) if size < len(bytesStr) { for !utf8.RuneStart(bytesStr[size]) { - size -= 1 + size-- } trimIndex = size } diff --git a/vendor/github.com/prometheus/prometheus/template/template.go b/vendor/github.com/prometheus/prometheus/template/template.go index 031bdbd8b8d78..0a31c4e54a4a3 100644 --- a/vendor/github.com/prometheus/prometheus/template/template.go +++ b/vendor/github.com/prometheus/prometheus/template/template.go @@ -219,7 +219,7 @@ func NewTemplateExpander( seconds := int64(v) % 60 minutes := (int64(v) / 60) % 60 hours := (int64(v) / 60 / 60) % 24 - days := (int64(v) / 60 / 60 / 24) + days := int64(v) / 60 / 60 / 24 // For days to minutes, we display seconds as an integer. if days != 0 { return fmt.Sprintf("%s%dd %dh %dm %ds", sign, days, hours, minutes, seconds) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/CHANGELOG.md b/vendor/github.com/prometheus/prometheus/tsdb/CHANGELOG.md index 12364b09fc15f..8f23acc190720 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/CHANGELOG.md +++ b/vendor/github.com/prometheus/prometheus/tsdb/CHANGELOG.md @@ -78,7 +78,7 @@ - [ENHANCEMENT] When closing the db any running compaction will be cancelled so it doesn't block. - `NewLeveledCompactor` takes a context. - [CHANGE] `prometheus_tsdb_storage_blocks_bytes_total` is now `prometheus_tsdb_storage_blocks_bytes`. - - [BUGFIX] Improved Postings Merge performance. Fixes a regression from the the previous release. + - [BUGFIX] Improved Postings Merge performance. Fixes a regression from the previous release. - [BUGFIX] LiveReader can get into an infinite loop on corrupt WALs. ## 0.4.0 @@ -91,7 +91,7 @@ - `OpenBlock` signature changed to take a logger. - [REMOVED] `PrefixMatcher` is considered unused so was removed. - [CLEANUP] `Options.WALFlushInterval` is removed as it wasn't used anywhere. - - [FEATURE] Add new `LiveReader` to WAL pacakge. Added to allow live tailing of a WAL segment, used by Prometheus Remote Write after refactor. The main difference between the new reader and the existing `Reader` is that for `LiveReader` a call to `Next()` that returns false does not mean that there will never be more data to read. + - [FEATURE] Add new `LiveReader` to WAL package. Added to allow live tailing of a WAL segment, used by Prometheus Remote Write after refactor. The main difference between the new reader and the existing `Reader` is that for `LiveReader` a call to `Next()` that returns false does not mean that there will never be more data to read. ## 0.3.1 diff --git a/vendor/github.com/prometheus/prometheus/tsdb/block.go b/vendor/github.com/prometheus/prometheus/tsdb/block.go index b7771d6bbc35a..0030fbd6854cb 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/block.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/block.go @@ -201,6 +201,7 @@ type BlockMetaCompaction struct { const indexFilename = "index" const metaFilename = "meta.json" +const metaVersion1 = 1 func chunkDir(dir string) string { return filepath.Join(dir, "chunks") } @@ -214,7 +215,7 @@ func readMetaFile(dir string) (*BlockMeta, int64, error) { if err := json.Unmarshal(b, &m); err != nil { return nil, 0, err } - if m.Version != 1 { + if m.Version != metaVersion1 { return nil, 0, errors.Errorf("unexpected meta file version %d", m.Version) } @@ -222,7 +223,7 @@ func readMetaFile(dir string) (*BlockMeta, int64, error) { } func writeMetaFile(logger log.Logger, dir string, meta *BlockMeta) (int64, error) { - meta.Version = 1 + meta.Version = metaVersion1 // Make any changes to the file appear atomic. path := filepath.Join(dir, metaFilename) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/db.go b/vendor/github.com/prometheus/prometheus/tsdb/db.go index c80aa07043454..e29aedf5bace2 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/db.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/db.go @@ -286,6 +286,49 @@ func OpenDBReadOnly(dir string, l log.Logger) (*DBReadOnly, error) { }, nil } +// FlushWAL creates a new block containing all data that's currently in the memory buffer/WAL. +// Samples that are in existing blocks will not be written to the new block. +// Note that if the read only database is running concurrently with a +// writable database then writing the WAL to the database directory can race. +func (db *DBReadOnly) FlushWAL(dir string) error { + blockReaders, err := db.Blocks() + if err != nil { + return errors.Wrap(err, "read blocks") + } + maxBlockTime := int64(math.MinInt64) + if len(blockReaders) > 0 { + maxBlockTime = blockReaders[len(blockReaders)-1].Meta().MaxTime + } + w, err := wal.Open(db.logger, nil, filepath.Join(db.dir, "wal")) + if err != nil { + return err + } + head, err := NewHead(nil, db.logger, w, 1) + if err != nil { + return err + } + // Set the min valid time for the ingested wal samples + // to be no lower than the maxt of the last block. + if err := head.Init(maxBlockTime); err != nil { + return errors.Wrap(err, "read WAL") + } + mint := head.MinTime() + maxt := head.MaxTime() + rh := &rangeHead{ + head: head, + mint: mint, + maxt: maxt, + } + compactor, err := NewLeveledCompactor(context.Background(), nil, db.logger, DefaultOptions.BlockRanges, chunkenc.NewPool()) + if err != nil { + return errors.Wrap(err, "create leveled compactor") + } + // Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime). + // Because of this block intervals are always +1 than the total samples it includes. + _, err = compactor.Write(dir, rh, mint, maxt+1, nil) + return errors.Wrap(err, "writing WAL") +} + // Querier loads the wal and returns a new querier over the data partition for the given time range. // Current implementation doesn't support multiple Queriers. func (db *DBReadOnly) Querier(mint, maxt int64) (Querier, error) { @@ -294,12 +337,12 @@ func (db *DBReadOnly) Querier(mint, maxt int64) (Querier, error) { return nil, ErrClosed default: } - blocksReaders, err := db.Blocks() + blockReaders, err := db.Blocks() if err != nil { return nil, err } - blocks := make([]*Block, len(blocksReaders)) - for i, b := range blocksReaders { + blocks := make([]*Block, len(blockReaders)) + for i, b := range blockReaders { b, ok := b.(*Block) if !ok { return nil, errors.New("unable to convert a read only block to a normal block") @@ -380,7 +423,7 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) { } if len(loadable) == 0 { - return nil, errors.New("no blocks found") + return nil, nil } sort.Slice(loadable, func(i, j int) bool { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head.go b/vendor/github.com/prometheus/prometheus/tsdb/head.go index 219514ffe1fc8..6c9975a136285 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head.go @@ -1046,7 +1046,7 @@ func (h *Head) gc() { h.metrics.seriesRemoved.Add(float64(seriesRemoved)) h.metrics.chunksRemoved.Add(float64(chunksRemoved)) h.metrics.chunks.Sub(float64(chunksRemoved)) - // Using AddUint64 to substract series removed. + // Using AddUint64 to subtract series removed. // See: https://golang.org/pkg/sync/atomic/#AddUint64. atomic.AddUint64(&h.numSeries, ^uint64(seriesRemoved-1)) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/querier.go b/vendor/github.com/prometheus/prometheus/tsdb/querier.go index aba0f4dd1d38b..357232e46150d 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/querier.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/querier.go @@ -328,6 +328,7 @@ func findSetMatches(pattern string) []string { func PostingsForMatchers(ix IndexReader, ms ...labels.Matcher) (index.Postings, error) { var its, notIts []index.Postings // See which label must be non-empty. + // Optimization for case like {l=~".", l!="1"}. labelMustBeSet := make(map[string]bool, len(ms)) for _, m := range ms { if !m.Matches("") { @@ -336,9 +337,9 @@ func PostingsForMatchers(ix IndexReader, ms ...labels.Matcher) (index.Postings, } for _, m := range ms { - matchesEmpty := m.Matches("") - if labelMustBeSet[m.Name()] || !matchesEmpty { + if labelMustBeSet[m.Name()] { // If this matcher must be non-empty, we can be smarter. + matchesEmpty := m.Matches("") nm, isNot := m.(*labels.NotMatcher) if isNot && matchesEmpty { // l!="foo" // If the label can't be empty and is a Not and the inner matcher @@ -444,7 +445,7 @@ func postingsForMatcher(ix IndexReader, m labels.Matcher) (index.Postings, error return index.Merge(rit...), nil } -// inversePostingsForMatcher eeturns the postings for the series with the label name set but not matching the matcher. +// inversePostingsForMatcher returns the postings for the series with the label name set but not matching the matcher. func inversePostingsForMatcher(ix IndexReader, m labels.Matcher) (index.Postings, error) { tpls, err := ix.LabelValues(m.Name()) if err != nil { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/repair.go b/vendor/github.com/prometheus/prometheus/tsdb/repair.go index 494f005e4222b..0df612d3d48c2 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/repair.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/repair.go @@ -55,7 +55,7 @@ func repairBadIndexVersion(logger log.Logger, dir string) error { if err != nil { return wrapErr(err, d) } - if meta.Version == 1 { + if meta.Version == metaVersion1 { level.Info(logger).Log( "msg", "found healthy block", "mint", meta.MinTime, @@ -108,7 +108,7 @@ func repairBadIndexVersion(logger log.Logger, dir string) error { return wrapErr(err, d) } // Reset version of meta.json to 1. - meta.Version = 1 + meta.Version = metaVersion1 if _, err := writeMetaFile(logger, d, meta); err != nil { return wrapErr(err, d) } @@ -126,7 +126,7 @@ func readBogusMetaFile(dir string) (*BlockMeta, error) { if err := json.Unmarshal(b, &m); err != nil { return nil, err } - if m.Version != 1 && m.Version != 2 { + if m.Version != metaVersion1 && m.Version != 2 { return nil, errors.Errorf("unexpected meta file version %d", m.Version) } return &m, nil diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal.go b/vendor/github.com/prometheus/prometheus/tsdb/wal.go index 4809889f32b85..187feabd9df4a 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wal.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wal.go @@ -280,7 +280,7 @@ func (w *SegmentWAL) truncate(err error, file int, lastOffset int64) error { return err } -// Reader returns a new reader over the the write ahead log data. +// Reader returns a new reader over the write ahead log data. // It must be completely consumed before writing to the WAL. func (w *SegmentWAL) Reader() WALReader { return &repairingWALReader{ diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go b/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go index 82081e321a0d7..6d64bb37ef922 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go @@ -64,6 +64,14 @@ func (p *page) full() bool { return pageSize-p.alloc < recordHeaderSize } +func (p *page) reset() { + for i := range p.buf { + p.buf[i] = 0 + } + p.alloc = 0 + p.flushed = 0 +} + // Segment represents a segment file. type Segment struct { *os.File @@ -205,15 +213,16 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi } registerMetrics(reg, w) - _, j, err := w.Segments() - // Index of the Segment we want to open and write to. - writeSegmentIndex := 0 + _, last, err := w.Segments() if err != nil { return nil, errors.Wrap(err, "get segment range") } + + // Index of the Segment we want to open and write to. + writeSegmentIndex := 0 // If some segments already exist create one with a higher index than the last segment. - if j != -1 { - writeSegmentIndex = j + 1 + if last != -1 { + writeSegmentIndex = last + 1 } segment, err := CreateSegment(w.dir, writeSegmentIndex) @@ -401,7 +410,7 @@ func (w *WAL) Repair(origErr error) error { return errors.Wrap(err, "delete corrupted segment") } - // Explicitly close the the segment we just repaired to avoid issues with Windows. + // Explicitly close the segment we just repaired to avoid issues with Windows. s.Close() // We always want to start writing to a new Segment rather than an existing @@ -493,11 +502,7 @@ func (w *WAL) flushPage(clear bool) error { // We flushed an entire page, prepare a new one. if clear { - for i := range p.buf { - p.buf[i] = 0 - } - p.alloc = 0 - p.flushed = 0 + p.reset() w.donePages++ w.pageCompletions.Inc() } diff --git a/vendor/github.com/prometheus/prometheus/util/stats/query_stats.go b/vendor/github.com/prometheus/prometheus/util/stats/query_stats.go index 181bdde3b944a..3fea3921fa661 100644 --- a/vendor/github.com/prometheus/prometheus/util/stats/query_stats.go +++ b/vendor/github.com/prometheus/prometheus/util/stats/query_stats.go @@ -54,7 +54,7 @@ func (s QueryTiming) String() string { } } -// Return a string representation of a QueryTiming span operation. +// SpanOperation returns a string representation of a QueryTiming span operation. func (s QueryTiming) SpanOperation() string { switch s { case EvalTotalTime: diff --git a/vendor/github.com/prometheus/prometheus/util/testutil/directory.go b/vendor/github.com/prometheus/prometheus/util/testutil/directory.go index d3c9c926f1218..5f1c31554ce21 100644 --- a/vendor/github.com/prometheus/prometheus/util/testutil/directory.go +++ b/vendor/github.com/prometheus/prometheus/util/testutil/directory.go @@ -14,8 +14,13 @@ package testutil import ( + "crypto/sha256" + "io" "io/ioutil" "os" + "path/filepath" + "strconv" + "testing" ) const ( @@ -127,3 +132,51 @@ func NewTemporaryDirectory(name string, t T) (handler TemporaryDirectory) { return } + +// DirSize returns the size in bytes of all files in a directory. +func DirSize(t *testing.T, path string) int64 { + var size int64 + err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error { + Ok(t, err) + if !info.IsDir() { + size += info.Size() + } + return nil + }) + Ok(t, err) + return size +} + +// DirHash returns a hash of all files attribites and their content within a directory. +func DirHash(t *testing.T, path string) []byte { + hash := sha256.New() + err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error { + Ok(t, err) + + if info.IsDir() { + return nil + } + f, err := os.Open(path) + Ok(t, err) + defer f.Close() + + _, err = io.Copy(hash, f) + Ok(t, err) + + _, err = io.WriteString(hash, strconv.Itoa(int(info.Size()))) + Ok(t, err) + + _, err = io.WriteString(hash, info.Name()) + Ok(t, err) + + modTime, err := info.ModTime().GobEncode() + Ok(t, err) + + _, err = io.WriteString(hash, string(modTime)) + Ok(t, err) + return nil + }) + Ok(t, err) + + return hash.Sum(nil) +} diff --git a/vendor/github.com/prometheus/prometheus/util/testutil/logging.go b/vendor/github.com/prometheus/prometheus/util/testutil/logging.go new file mode 100644 index 0000000000000..839b86690be76 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/util/testutil/logging.go @@ -0,0 +1,35 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutil + +import ( + "testing" + + "github.com/go-kit/kit/log" +) + +type logger struct { + t *testing.T +} + +// NewLogger returns a gokit compatible Logger which calls t.Log. +func NewLogger(t *testing.T) log.Logger { + return logger{t: t} +} + +// Log implements log.Logger. +func (t logger) Log(keyvals ...interface{}) error { + t.t.Log(keyvals...) + return nil +} diff --git a/vendor/github.com/prometheus/prometheus/util/testutil/testing.go b/vendor/github.com/prometheus/prometheus/util/testutil/testing.go index 52abce5bf6c4c..39b44e53617e9 100644 --- a/vendor/github.com/prometheus/prometheus/util/testutil/testing.go +++ b/vendor/github.com/prometheus/prometheus/util/testutil/testing.go @@ -52,10 +52,11 @@ func Ok(tb TB, err error) { } // NotOk fails the test if an err is nil. -func NotOk(tb TB, err error, format string, a ...interface{}) { +func NotOk(tb TB, err error, a ...interface{}) { tb.Helper() if err == nil { if len(a) != 0 { + format := a[0].(string) tb.Fatalf("\033[31m"+format+": expected error, got none\033[39m", a...) } tb.Fatalf("\033[31mexpected error, got none\033[39m") @@ -76,7 +77,7 @@ func formatMessage(msgAndArgs []interface{}) string { } if msg, ok := msgAndArgs[0].(string); ok { - return fmt.Sprintf("\nmsg: "+msg, msgAndArgs[1:]...) + return fmt.Sprintf("\n\nmsg: "+msg, msgAndArgs[1:]...) } return "" } diff --git a/vendor/github.com/prometheus/tsdb/chunkenc/bstream.go b/vendor/github.com/prometheus/tsdb/chunkenc/bstream.go deleted file mode 100644 index 0a02a730358ca..0000000000000 --- a/vendor/github.com/prometheus/tsdb/chunkenc/bstream.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// The code in this file was largely written by Damian Gryski as part of -// https://github.com/dgryski/go-tsz and published under the license below. -// It received minor modifications to suit Prometheus's needs. - -// Copyright (c) 2015,2016 Damian Gryski -// All rights reserved. - -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: - -// * Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package chunkenc - -import "io" - -// bstream is a stream of bits. -type bstream struct { - stream []byte // the data stream - count uint8 // how many bits are valid in current byte -} - -func newBReader(b []byte) bstream { - return bstream{stream: b, count: 8} -} - -func (b *bstream) bytes() []byte { - return b.stream -} - -type bit bool - -const ( - zero bit = false - one bit = true -) - -func (b *bstream) writeBit(bit bit) { - if b.count == 0 { - b.stream = append(b.stream, 0) - b.count = 8 - } - - i := len(b.stream) - 1 - - if bit { - b.stream[i] |= 1 << (b.count - 1) - } - - b.count-- -} - -func (b *bstream) writeByte(byt byte) { - if b.count == 0 { - b.stream = append(b.stream, 0) - b.count = 8 - } - - i := len(b.stream) - 1 - - // fill up b.b with b.count bits from byt - b.stream[i] |= byt >> (8 - b.count) - - b.stream = append(b.stream, 0) - i++ - b.stream[i] = byt << b.count -} - -func (b *bstream) writeBits(u uint64, nbits int) { - u <<= (64 - uint(nbits)) - for nbits >= 8 { - byt := byte(u >> 56) - b.writeByte(byt) - u <<= 8 - nbits -= 8 - } - - for nbits > 0 { - b.writeBit((u >> 63) == 1) - u <<= 1 - nbits-- - } -} - -func (b *bstream) readBit() (bit, error) { - if len(b.stream) == 0 { - return false, io.EOF - } - - if b.count == 0 { - b.stream = b.stream[1:] - - if len(b.stream) == 0 { - return false, io.EOF - } - b.count = 8 - } - - d := (b.stream[0] << (8 - b.count)) & 0x80 - b.count-- - return d != 0, nil -} - -func (b *bstream) ReadByte() (byte, error) { - return b.readByte() -} - -func (b *bstream) readByte() (byte, error) { - if len(b.stream) == 0 { - return 0, io.EOF - } - - if b.count == 0 { - b.stream = b.stream[1:] - - if len(b.stream) == 0 { - return 0, io.EOF - } - return b.stream[0], nil - } - - if b.count == 8 { - b.count = 0 - return b.stream[0], nil - } - - byt := b.stream[0] << (8 - b.count) - b.stream = b.stream[1:] - - if len(b.stream) == 0 { - return 0, io.EOF - } - - // We just advanced the stream and can assume the shift to be 0. - byt |= b.stream[0] >> b.count - - return byt, nil -} - -func (b *bstream) readBits(nbits int) (uint64, error) { - var u uint64 - - for nbits >= 8 { - byt, err := b.readByte() - if err != nil { - return 0, err - } - - u = (u << 8) | uint64(byt) - nbits -= 8 - } - - if nbits == 0 { - return u, nil - } - - if nbits > int(b.count) { - u = (u << uint(b.count)) | uint64((b.stream[0]<<(8-b.count))>>(8-b.count)) - nbits -= int(b.count) - b.stream = b.stream[1:] - - if len(b.stream) == 0 { - return 0, io.EOF - } - b.count = 8 - } - - u = (u << uint(nbits)) | uint64((b.stream[0]<<(8-b.count))>>(8-uint(nbits))) - b.count -= uint8(nbits) - return u, nil -} diff --git a/vendor/github.com/prometheus/tsdb/chunkenc/chunk.go b/vendor/github.com/prometheus/tsdb/chunkenc/chunk.go deleted file mode 100644 index dc566606d5580..0000000000000 --- a/vendor/github.com/prometheus/tsdb/chunkenc/chunk.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package chunkenc - -import ( - "fmt" - "sync" - - "github.com/pkg/errors" -) - -// Encoding is the identifier for a chunk encoding. -type Encoding uint8 - -func (e Encoding) String() string { - switch e { - case EncNone: - return "none" - case EncXOR: - return "XOR" - } - return "" -} - -// The different available chunk encodings. -const ( - EncNone Encoding = iota - EncXOR -) - -// Chunk holds a sequence of sample pairs that can be iterated over and appended to. -type Chunk interface { - Bytes() []byte - Encoding() Encoding - Appender() (Appender, error) - Iterator() Iterator - NumSamples() int -} - -// Appender adds sample pairs to a chunk. -type Appender interface { - Append(int64, float64) -} - -// Iterator is a simple iterator that can only get the next value. -type Iterator interface { - At() (int64, float64) - Err() error - Next() bool -} - -// NewNopIterator returns a new chunk iterator that does not hold any data. -func NewNopIterator() Iterator { - return nopIterator{} -} - -type nopIterator struct{} - -func (nopIterator) At() (int64, float64) { return 0, 0 } -func (nopIterator) Next() bool { return false } -func (nopIterator) Err() error { return nil } - -// Pool is used to create and reuse chunk references to avoid allocations. -type Pool interface { - Put(Chunk) error - Get(e Encoding, b []byte) (Chunk, error) -} - -// pool is a memory pool of chunk objects. -type pool struct { - xor sync.Pool -} - -// NewPool returns a new pool. -func NewPool() Pool { - return &pool{ - xor: sync.Pool{ - New: func() interface{} { - return &XORChunk{b: bstream{}} - }, - }, - } -} - -func (p *pool) Get(e Encoding, b []byte) (Chunk, error) { - switch e { - case EncXOR: - c := p.xor.Get().(*XORChunk) - c.b.stream = b - c.b.count = 0 - return c, nil - } - return nil, errors.Errorf("invalid encoding %q", e) -} - -func (p *pool) Put(c Chunk) error { - switch c.Encoding() { - case EncXOR: - xc, ok := c.(*XORChunk) - // This may happen often with wrapped chunks. Nothing we can really do about - // it but returning an error would cause a lot of allocations again. Thus, - // we just skip it. - if !ok { - return nil - } - xc.b.stream = nil - xc.b.count = 0 - p.xor.Put(c) - default: - return errors.Errorf("invalid encoding %q", c.Encoding()) - } - return nil -} - -// FromData returns a chunk from a byte slice of chunk data. -// This is there so that users of the library can easily create chunks from -// bytes. -func FromData(e Encoding, d []byte) (Chunk, error) { - switch e { - case EncXOR: - return &XORChunk{b: bstream{count: 0, stream: d}}, nil - } - return nil, fmt.Errorf("unknown chunk encoding: %d", e) -} diff --git a/vendor/github.com/prometheus/tsdb/chunkenc/xor.go b/vendor/github.com/prometheus/tsdb/chunkenc/xor.go deleted file mode 100644 index 1518772b3c42e..0000000000000 --- a/vendor/github.com/prometheus/tsdb/chunkenc/xor.go +++ /dev/null @@ -1,386 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// The code in this file was largely written by Damian Gryski as part of -// https://github.com/dgryski/go-tsz and published under the license below. -// It was modified to accommodate reading from byte slices without modifying -// the underlying bytes, which would panic when reading from mmaped -// read-only byte slices. - -// Copyright (c) 2015,2016 Damian Gryski -// All rights reserved. - -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: - -// * Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// * Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package chunkenc - -import ( - "encoding/binary" - "math" - "math/bits" -) - -// XORChunk holds XOR encoded sample data. -type XORChunk struct { - b bstream -} - -// NewXORChunk returns a new chunk with XOR encoding of the given size. -func NewXORChunk() *XORChunk { - b := make([]byte, 2, 128) - return &XORChunk{b: bstream{stream: b, count: 0}} -} - -// Encoding returns the encoding type. -func (c *XORChunk) Encoding() Encoding { - return EncXOR -} - -// Bytes returns the underlying byte slice of the chunk. -func (c *XORChunk) Bytes() []byte { - return c.b.bytes() -} - -// NumSamples returns the number of samples in the chunk. -func (c *XORChunk) NumSamples() int { - return int(binary.BigEndian.Uint16(c.Bytes())) -} - -// Appender implements the Chunk interface. -func (c *XORChunk) Appender() (Appender, error) { - it := c.iterator() - - // To get an appender we must know the state it would have if we had - // appended all existing data from scratch. - // We iterate through the end and populate via the iterator's state. - for it.Next() { - } - if err := it.Err(); err != nil { - return nil, err - } - - a := &xorAppender{ - b: &c.b, - t: it.t, - v: it.val, - tDelta: it.tDelta, - leading: it.leading, - trailing: it.trailing, - } - if binary.BigEndian.Uint16(a.b.bytes()) == 0 { - a.leading = 0xff - } - return a, nil -} - -func (c *XORChunk) iterator() *xorIterator { - // Should iterators guarantee to act on a copy of the data so it doesn't lock append? - // When using striped locks to guard access to chunks, probably yes. - // Could only copy data if the chunk is not completed yet. - return &xorIterator{ - br: newBReader(c.b.bytes()[2:]), - numTotal: binary.BigEndian.Uint16(c.b.bytes()), - } -} - -// Iterator implements the Chunk interface. -func (c *XORChunk) Iterator() Iterator { - return c.iterator() -} - -type xorAppender struct { - b *bstream - - t int64 - v float64 - tDelta uint64 - - leading uint8 - trailing uint8 -} - -func (a *xorAppender) Append(t int64, v float64) { - var tDelta uint64 - num := binary.BigEndian.Uint16(a.b.bytes()) - - if num == 0 { - buf := make([]byte, binary.MaxVarintLen64) - for _, b := range buf[:binary.PutVarint(buf, t)] { - a.b.writeByte(b) - } - a.b.writeBits(math.Float64bits(v), 64) - - } else if num == 1 { - tDelta = uint64(t - a.t) - - buf := make([]byte, binary.MaxVarintLen64) - for _, b := range buf[:binary.PutUvarint(buf, tDelta)] { - a.b.writeByte(b) - } - - a.writeVDelta(v) - - } else { - tDelta = uint64(t - a.t) - dod := int64(tDelta - a.tDelta) - - // Gorilla has a max resolution of seconds, Prometheus milliseconds. - // Thus we use higher value range steps with larger bit size. - switch { - case dod == 0: - a.b.writeBit(zero) - case bitRange(dod, 14): - a.b.writeBits(0x02, 2) // '10' - a.b.writeBits(uint64(dod), 14) - case bitRange(dod, 17): - a.b.writeBits(0x06, 3) // '110' - a.b.writeBits(uint64(dod), 17) - case bitRange(dod, 20): - a.b.writeBits(0x0e, 4) // '1110' - a.b.writeBits(uint64(dod), 20) - default: - a.b.writeBits(0x0f, 4) // '1111' - a.b.writeBits(uint64(dod), 64) - } - - a.writeVDelta(v) - } - - a.t = t - a.v = v - binary.BigEndian.PutUint16(a.b.bytes(), num+1) - a.tDelta = tDelta -} - -func bitRange(x int64, nbits uint8) bool { - return -((1<<(nbits-1))-1) <= x && x <= 1<<(nbits-1) -} - -func (a *xorAppender) writeVDelta(v float64) { - vDelta := math.Float64bits(v) ^ math.Float64bits(a.v) - - if vDelta == 0 { - a.b.writeBit(zero) - return - } - a.b.writeBit(one) - - leading := uint8(bits.LeadingZeros64(vDelta)) - trailing := uint8(bits.TrailingZeros64(vDelta)) - - // Clamp number of leading zeros to avoid overflow when encoding. - if leading >= 32 { - leading = 31 - } - - if a.leading != 0xff && leading >= a.leading && trailing >= a.trailing { - a.b.writeBit(zero) - a.b.writeBits(vDelta>>a.trailing, 64-int(a.leading)-int(a.trailing)) - } else { - a.leading, a.trailing = leading, trailing - - a.b.writeBit(one) - a.b.writeBits(uint64(leading), 5) - - // Note that if leading == trailing == 0, then sigbits == 64. But that value doesn't actually fit into the 6 bits we have. - // Luckily, we never need to encode 0 significant bits, since that would put us in the other case (vdelta == 0). - // So instead we write out a 0 and adjust it back to 64 on unpacking. - sigbits := 64 - leading - trailing - a.b.writeBits(uint64(sigbits), 6) - a.b.writeBits(vDelta>>trailing, int(sigbits)) - } -} - -type xorIterator struct { - br bstream - numTotal uint16 - numRead uint16 - - t int64 - val float64 - - leading uint8 - trailing uint8 - - tDelta uint64 - err error -} - -func (it *xorIterator) At() (int64, float64) { - return it.t, it.val -} - -func (it *xorIterator) Err() error { - return it.err -} - -func (it *xorIterator) Next() bool { - if it.err != nil || it.numRead == it.numTotal { - return false - } - - if it.numRead == 0 { - t, err := binary.ReadVarint(&it.br) - if err != nil { - it.err = err - return false - } - v, err := it.br.readBits(64) - if err != nil { - it.err = err - return false - } - it.t = t - it.val = math.Float64frombits(v) - - it.numRead++ - return true - } - if it.numRead == 1 { - tDelta, err := binary.ReadUvarint(&it.br) - if err != nil { - it.err = err - return false - } - it.tDelta = tDelta - it.t = it.t + int64(it.tDelta) - - return it.readValue() - } - - var d byte - // read delta-of-delta - for i := 0; i < 4; i++ { - d <<= 1 - bit, err := it.br.readBit() - if err != nil { - it.err = err - return false - } - if bit == zero { - break - } - d |= 1 - } - var sz uint8 - var dod int64 - switch d { - case 0x00: - // dod == 0 - case 0x02: - sz = 14 - case 0x06: - sz = 17 - case 0x0e: - sz = 20 - case 0x0f: - bits, err := it.br.readBits(64) - if err != nil { - it.err = err - return false - } - - dod = int64(bits) - } - - if sz != 0 { - bits, err := it.br.readBits(int(sz)) - if err != nil { - it.err = err - return false - } - if bits > (1 << (sz - 1)) { - // or something - bits = bits - (1 << sz) - } - dod = int64(bits) - } - - it.tDelta = uint64(int64(it.tDelta) + dod) - it.t = it.t + int64(it.tDelta) - - return it.readValue() -} - -func (it *xorIterator) readValue() bool { - bit, err := it.br.readBit() - if err != nil { - it.err = err - return false - } - - if bit == zero { - // it.val = it.val - } else { - bit, err := it.br.readBit() - if err != nil { - it.err = err - return false - } - if bit == zero { - // reuse leading/trailing zero bits - // it.leading, it.trailing = it.leading, it.trailing - } else { - bits, err := it.br.readBits(5) - if err != nil { - it.err = err - return false - } - it.leading = uint8(bits) - - bits, err = it.br.readBits(6) - if err != nil { - it.err = err - return false - } - mbits := uint8(bits) - // 0 significant bits here means we overflowed and we actually need 64; see comment in encoder - if mbits == 0 { - mbits = 64 - } - it.trailing = 64 - it.leading - mbits - } - - mbits := int(64 - it.leading - it.trailing) - bits, err := it.br.readBits(mbits) - if err != nil { - it.err = err - return false - } - vbits := math.Float64bits(it.val) - vbits ^= (bits << it.trailing) - it.val = math.Float64frombits(vbits) - } - - it.numRead++ - return true -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index aa1c2b95cddb8..e0364e9e7f641 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -113,6 +113,17 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { return Error(t, err, append([]interface{}{msg}, args...)...) } +// Eventuallyf asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Eventually(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...) +} + // Exactlyf asserts that two objects are equal in value and type. // // assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123)) @@ -157,6 +168,31 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool return FileExists(t, path, append([]interface{}{msg}, args...)...) } +// Greaterf asserts that the first element is greater than the second +// +// assert.Greaterf(t, 2, 1, "error message %s", "formatted") +// assert.Greaterf(t, float64(2, "error message %s", "formatted"), float64(1)) +// assert.Greaterf(t, "b", "a", "error message %s", "formatted") +func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Greater(t, e1, e2, append([]interface{}{msg}, args...)...) +} + +// GreaterOrEqualf asserts that the first element is greater than or equal to the second +// +// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") +// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") +// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") +// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") +func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return GreaterOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...) +} + // HTTPBodyContainsf asserts that a specified handler returns a // body that contains a string. // @@ -289,6 +325,14 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...) } +// YAMLEqf asserts that two YAML strings are equivalent. +func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return YAMLEq(t, expected, actual, append([]interface{}{msg}, args...)...) +} + // Lenf asserts that the specified object has specific length. // Lenf also fails if the object has a type that len() not accept. // @@ -300,6 +344,31 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf return Len(t, object, length, append([]interface{}{msg}, args...)...) } +// Lessf asserts that the first element is less than the second +// +// assert.Lessf(t, 1, 2, "error message %s", "formatted") +// assert.Lessf(t, float64(1, "error message %s", "formatted"), float64(2)) +// assert.Lessf(t, "a", "b", "error message %s", "formatted") +func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Less(t, e1, e2, append([]interface{}{msg}, args...)...) +} + +// LessOrEqualf asserts that the first element is less than or equal to the second +// +// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") +// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") +// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") +// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") +func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...) +} + // Nilf asserts that the specified object is nil. // // assert.Nilf(t, err, "error message %s", "formatted") @@ -444,6 +513,19 @@ func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...in return Regexp(t, rx, str, append([]interface{}{msg}, args...)...) } +// Samef asserts that two pointers reference the same object. +// +// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func Samef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Same(t, expected, actual, append([]interface{}{msg}, args...)...) +} + // Subsetf asserts that the specified list(array, slice...) contains all // elements given in the specified subset(array, slice...). // diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index de39f794e7216..26830403a9b35 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -215,6 +215,28 @@ func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { return Errorf(a.t, err, msg, args...) } +// Eventually asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond) +func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Eventually(a.t, condition, waitFor, tick, msgAndArgs...) +} + +// Eventuallyf asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Eventuallyf(a.t, condition, waitFor, tick, msg, args...) +} + // Exactly asserts that two objects are equal in value and type. // // a.Exactly(int32(123), int64(123)) @@ -303,6 +325,56 @@ func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) b return FileExistsf(a.t, path, msg, args...) } +// Greater asserts that the first element is greater than the second +// +// a.Greater(2, 1) +// a.Greater(float64(2), float64(1)) +// a.Greater("b", "a") +func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Greater(a.t, e1, e2, msgAndArgs...) +} + +// GreaterOrEqual asserts that the first element is greater than or equal to the second +// +// a.GreaterOrEqual(2, 1) +// a.GreaterOrEqual(2, 2) +// a.GreaterOrEqual("b", "a") +// a.GreaterOrEqual("b", "b") +func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return GreaterOrEqual(a.t, e1, e2, msgAndArgs...) +} + +// GreaterOrEqualf asserts that the first element is greater than or equal to the second +// +// a.GreaterOrEqualf(2, 1, "error message %s", "formatted") +// a.GreaterOrEqualf(2, 2, "error message %s", "formatted") +// a.GreaterOrEqualf("b", "a", "error message %s", "formatted") +// a.GreaterOrEqualf("b", "b", "error message %s", "formatted") +func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return GreaterOrEqualf(a.t, e1, e2, msg, args...) +} + +// Greaterf asserts that the first element is greater than the second +// +// a.Greaterf(2, 1, "error message %s", "formatted") +// a.Greaterf(float64(2, "error message %s", "formatted"), float64(1)) +// a.Greaterf("b", "a", "error message %s", "formatted") +func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Greaterf(a.t, e1, e2, msg, args...) +} + // HTTPBodyContains asserts that a specified handler returns a // body that contains a string. // @@ -567,6 +639,22 @@ func (a *Assertions) JSONEqf(expected string, actual string, msg string, args .. return JSONEqf(a.t, expected, actual, msg, args...) } +// YAMLEq asserts that two YAML strings are equivalent. +func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return YAMLEq(a.t, expected, actual, msgAndArgs...) +} + +// YAMLEqf asserts that two YAML strings are equivalent. +func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return YAMLEqf(a.t, expected, actual, msg, args...) +} + // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // @@ -589,6 +677,56 @@ func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...in return Lenf(a.t, object, length, msg, args...) } +// Less asserts that the first element is less than the second +// +// a.Less(1, 2) +// a.Less(float64(1), float64(2)) +// a.Less("a", "b") +func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Less(a.t, e1, e2, msgAndArgs...) +} + +// LessOrEqual asserts that the first element is less than or equal to the second +// +// a.LessOrEqual(1, 2) +// a.LessOrEqual(2, 2) +// a.LessOrEqual("a", "b") +// a.LessOrEqual("b", "b") +func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return LessOrEqual(a.t, e1, e2, msgAndArgs...) +} + +// LessOrEqualf asserts that the first element is less than or equal to the second +// +// a.LessOrEqualf(1, 2, "error message %s", "formatted") +// a.LessOrEqualf(2, 2, "error message %s", "formatted") +// a.LessOrEqualf("a", "b", "error message %s", "formatted") +// a.LessOrEqualf("b", "b", "error message %s", "formatted") +func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return LessOrEqualf(a.t, e1, e2, msg, args...) +} + +// Lessf asserts that the first element is less than the second +// +// a.Lessf(1, 2, "error message %s", "formatted") +// a.Lessf(float64(1, "error message %s", "formatted"), float64(2)) +// a.Lessf("a", "b", "error message %s", "formatted") +func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Lessf(a.t, e1, e2, msg, args...) +} + // Nil asserts that the specified object is nil. // // a.Nil(err) @@ -877,6 +1015,32 @@ func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args . return Regexpf(a.t, rx, str, msg, args...) } +// Same asserts that two pointers reference the same object. +// +// a.Same(ptr1, ptr2) +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func (a *Assertions) Same(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Same(a.t, expected, actual, msgAndArgs...) +} + +// Samef asserts that two pointers reference the same object. +// +// a.Samef(ptr1, ptr2, "error message %s", "formatted") +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return Samef(a.t, expected, actual, msg, args...) +} + // Subset asserts that the specified list(array, slice...) contains all // elements given in the specified subset(array, slice...). // diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go new file mode 100644 index 0000000000000..15a486ca6e246 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -0,0 +1,309 @@ +package assert + +import ( + "fmt" + "reflect" +) + +func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) { + switch kind { + case reflect.Int: + { + intobj1 := obj1.(int) + intobj2 := obj2.(int) + if intobj1 > intobj2 { + return -1, true + } + if intobj1 == intobj2 { + return 0, true + } + if intobj1 < intobj2 { + return 1, true + } + } + case reflect.Int8: + { + int8obj1 := obj1.(int8) + int8obj2 := obj2.(int8) + if int8obj1 > int8obj2 { + return -1, true + } + if int8obj1 == int8obj2 { + return 0, true + } + if int8obj1 < int8obj2 { + return 1, true + } + } + case reflect.Int16: + { + int16obj1 := obj1.(int16) + int16obj2 := obj2.(int16) + if int16obj1 > int16obj2 { + return -1, true + } + if int16obj1 == int16obj2 { + return 0, true + } + if int16obj1 < int16obj2 { + return 1, true + } + } + case reflect.Int32: + { + int32obj1 := obj1.(int32) + int32obj2 := obj2.(int32) + if int32obj1 > int32obj2 { + return -1, true + } + if int32obj1 == int32obj2 { + return 0, true + } + if int32obj1 < int32obj2 { + return 1, true + } + } + case reflect.Int64: + { + int64obj1 := obj1.(int64) + int64obj2 := obj2.(int64) + if int64obj1 > int64obj2 { + return -1, true + } + if int64obj1 == int64obj2 { + return 0, true + } + if int64obj1 < int64obj2 { + return 1, true + } + } + case reflect.Uint: + { + uintobj1 := obj1.(uint) + uintobj2 := obj2.(uint) + if uintobj1 > uintobj2 { + return -1, true + } + if uintobj1 == uintobj2 { + return 0, true + } + if uintobj1 < uintobj2 { + return 1, true + } + } + case reflect.Uint8: + { + uint8obj1 := obj1.(uint8) + uint8obj2 := obj2.(uint8) + if uint8obj1 > uint8obj2 { + return -1, true + } + if uint8obj1 == uint8obj2 { + return 0, true + } + if uint8obj1 < uint8obj2 { + return 1, true + } + } + case reflect.Uint16: + { + uint16obj1 := obj1.(uint16) + uint16obj2 := obj2.(uint16) + if uint16obj1 > uint16obj2 { + return -1, true + } + if uint16obj1 == uint16obj2 { + return 0, true + } + if uint16obj1 < uint16obj2 { + return 1, true + } + } + case reflect.Uint32: + { + uint32obj1 := obj1.(uint32) + uint32obj2 := obj2.(uint32) + if uint32obj1 > uint32obj2 { + return -1, true + } + if uint32obj1 == uint32obj2 { + return 0, true + } + if uint32obj1 < uint32obj2 { + return 1, true + } + } + case reflect.Uint64: + { + uint64obj1 := obj1.(uint64) + uint64obj2 := obj2.(uint64) + if uint64obj1 > uint64obj2 { + return -1, true + } + if uint64obj1 == uint64obj2 { + return 0, true + } + if uint64obj1 < uint64obj2 { + return 1, true + } + } + case reflect.Float32: + { + float32obj1 := obj1.(float32) + float32obj2 := obj2.(float32) + if float32obj1 > float32obj2 { + return -1, true + } + if float32obj1 == float32obj2 { + return 0, true + } + if float32obj1 < float32obj2 { + return 1, true + } + } + case reflect.Float64: + { + float64obj1 := obj1.(float64) + float64obj2 := obj2.(float64) + if float64obj1 > float64obj2 { + return -1, true + } + if float64obj1 == float64obj2 { + return 0, true + } + if float64obj1 < float64obj2 { + return 1, true + } + } + case reflect.String: + { + stringobj1 := obj1.(string) + stringobj2 := obj2.(string) + if stringobj1 > stringobj2 { + return -1, true + } + if stringobj1 == stringobj2 { + return 0, true + } + if stringobj1 < stringobj2 { + return 1, true + } + } + } + + return 0, false +} + +// Greater asserts that the first element is greater than the second +// +// assert.Greater(t, 2, 1) +// assert.Greater(t, float64(2), float64(1)) +// assert.Greater(t, "b", "a") +func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + e1Kind := reflect.ValueOf(e1).Kind() + e2Kind := reflect.ValueOf(e2).Kind() + if e1Kind != e2Kind { + return Fail(t, "Elements should be the same type", msgAndArgs...) + } + + res, isComparable := compare(e1, e2, e1Kind) + if !isComparable { + return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + } + + if res != -1 { + return Fail(t, fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2), msgAndArgs...) + } + + return true +} + +// GreaterOrEqual asserts that the first element is greater than or equal to the second +// +// assert.GreaterOrEqual(t, 2, 1) +// assert.GreaterOrEqual(t, 2, 2) +// assert.GreaterOrEqual(t, "b", "a") +// assert.GreaterOrEqual(t, "b", "b") +func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + e1Kind := reflect.ValueOf(e1).Kind() + e2Kind := reflect.ValueOf(e2).Kind() + if e1Kind != e2Kind { + return Fail(t, "Elements should be the same type", msgAndArgs...) + } + + res, isComparable := compare(e1, e2, e1Kind) + if !isComparable { + return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + } + + if res != -1 && res != 0 { + return Fail(t, fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2), msgAndArgs...) + } + + return true +} + +// Less asserts that the first element is less than the second +// +// assert.Less(t, 1, 2) +// assert.Less(t, float64(1), float64(2)) +// assert.Less(t, "a", "b") +func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + e1Kind := reflect.ValueOf(e1).Kind() + e2Kind := reflect.ValueOf(e2).Kind() + if e1Kind != e2Kind { + return Fail(t, "Elements should be the same type", msgAndArgs...) + } + + res, isComparable := compare(e1, e2, e1Kind) + if !isComparable { + return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + } + + if res != 1 { + return Fail(t, fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2), msgAndArgs...) + } + + return true +} + +// LessOrEqual asserts that the first element is less than or equal to the second +// +// assert.LessOrEqual(t, 1, 2) +// assert.LessOrEqual(t, 2, 2) +// assert.LessOrEqual(t, "a", "b") +// assert.LessOrEqual(t, "b", "b") +func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + e1Kind := reflect.ValueOf(e1).Kind() + e2Kind := reflect.ValueOf(e2).Kind() + if e1Kind != e2Kind { + return Fail(t, "Elements should be the same type", msgAndArgs...) + } + + res, isComparable := compare(e1, e2, e1Kind) + if !isComparable { + return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + } + + if res != 1 && res != 0 { + return Fail(t, fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2), msgAndArgs...) + } + + return true +} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index 9bd4a80e4847c..044da8b01f226 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -18,6 +18,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/pmezard/go-difflib/difflib" + yaml "gopkg.in/yaml.v2" ) //go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_format.go.tmpl @@ -350,6 +351,37 @@ func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) } +// Same asserts that two pointers reference the same object. +// +// assert.Same(t, ptr1, ptr2) +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + expectedPtr, actualPtr := reflect.ValueOf(expected), reflect.ValueOf(actual) + if expectedPtr.Kind() != reflect.Ptr || actualPtr.Kind() != reflect.Ptr { + return Fail(t, "Invalid operation: both arguments must be pointers", msgAndArgs...) + } + + expectedType, actualType := reflect.TypeOf(expected), reflect.TypeOf(actual) + if expectedType != actualType { + return Fail(t, fmt.Sprintf("Pointer expected to be of type %v, but was %v", + expectedType, actualType), msgAndArgs...) + } + + if expected != actual { + return Fail(t, fmt.Sprintf("Not same: \n"+ + "expected: %p %#v\n"+ + "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) + } + + return true +} + // formatUnequalValues takes two values of arbitrary types and returns string // representations appropriate to be presented to the user. // @@ -479,14 +511,14 @@ func isEmpty(object interface{}) bool { // collection types are empty when they have no element case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: return objValue.Len() == 0 - // pointers are empty if nil or if the value they point to is empty + // pointers are empty if nil or if the value they point to is empty case reflect.Ptr: if objValue.IsNil() { return true } deref := objValue.Elem().Interface() return isEmpty(deref) - // for all other types, compare against the zero value + // for all other types, compare against the zero value default: zero := reflect.Zero(objValue.Type()) return reflect.DeepEqual(object, zero.Interface()) @@ -629,7 +661,7 @@ func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{ func includeElement(list interface{}, element interface{}) (ok, found bool) { listValue := reflect.ValueOf(list) - elementValue := reflect.ValueOf(element) + listKind := reflect.TypeOf(list).Kind() defer func() { if e := recover(); e != nil { ok = false @@ -637,11 +669,12 @@ func includeElement(list interface{}, element interface{}) (ok, found bool) { } }() - if reflect.TypeOf(list).Kind() == reflect.String { + if listKind == reflect.String { + elementValue := reflect.ValueOf(element) return true, strings.Contains(listValue.String(), elementValue.String()) } - if reflect.TypeOf(list).Kind() == reflect.Map { + if listKind == reflect.Map { mapKeys := listValue.MapKeys() for i := 0; i < len(mapKeys); i++ { if ObjectsAreEqual(mapKeys[i].Interface(), element) { @@ -1337,6 +1370,24 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...) } +// YAMLEq asserts that two YAML strings are equivalent. +func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + var expectedYAMLAsInterface, actualYAMLAsInterface interface{} + + if err := yaml.Unmarshal([]byte(expected), &expectedYAMLAsInterface); err != nil { + return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...) + } + + if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil { + return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...) + } + + return Equal(t, expectedYAMLAsInterface, actualYAMLAsInterface, msgAndArgs...) +} + func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { t := reflect.TypeOf(v) k := t.Kind() @@ -1371,8 +1422,8 @@ func diff(expected interface{}, actual interface{}) string { e = spewConfig.Sdump(expected) a = spewConfig.Sdump(actual) } else { - e = expected.(string) - a = actual.(string) + e = reflect.ValueOf(expected).String() + a = reflect.ValueOf(actual).String() } diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ @@ -1414,3 +1465,34 @@ var spewConfig = spew.ConfigState{ type tHelper interface { Helper() } + +// Eventually asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) +func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + + timer := time.NewTimer(waitFor) + ticker := time.NewTicker(tick) + checkPassed := make(chan bool) + defer timer.Stop() + defer ticker.Stop() + defer close(checkPassed) + for { + select { + case <-timer.C: + return Fail(t, "Condition never satisfied", msgAndArgs...) + case result := <-checkPassed: + if result { + return true + } + case <-ticker.C: + go func() { + checkPassed <- condition() + }() + } + } +} diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go index d6694ed78a101..b5288af5b7c30 100644 --- a/vendor/github.com/stretchr/testify/mock/mock.go +++ b/vendor/github.com/stretchr/testify/mock/mock.go @@ -262,17 +262,21 @@ func (m *Mock) On(methodName string, arguments ...interface{}) *Call { // */ func (m *Mock) findExpectedCall(method string, arguments ...interface{}) (int, *Call) { - for i, call := range m.ExpectedCalls { - if call.Method == method && call.Repeatability > -1 { + var expectedCall *Call + for i, call := range m.ExpectedCalls { + if call.Method == method { _, diffCount := call.Arguments.Diff(arguments) if diffCount == 0 { - return i, call + expectedCall = call + if call.Repeatability > -1 { + return i, call + } } - } } - return -1, nil + + return -1, expectedCall } func (m *Mock) findClosestCall(method string, arguments ...interface{}) (*Call, string) { @@ -344,13 +348,17 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen found, call := m.findExpectedCall(methodName, arguments...) if found < 0 { + // expected call found but it has already been called with repeatable times + if call != nil { + m.mutex.Unlock() + m.fail("\nassert: mock: The method has been called over %d times.\n\tEither do one more Mock.On(\"%s\").Return(...), or remove extra call.\n\tThis call was unexpected:\n\t\t%s\n\tat: %s", call.totalCalls, methodName, callString(methodName, arguments, true), assert.CallerInfo()) + } // we have to fail here - because we don't know what to do // as the return arguments. This is because: // // a) this is a totally unexpected call to this method, // b) the arguments are not what was expected, or // c) the developer has forgotten to add an accompanying On...Return pair. - closestCall, mismatch := m.findClosestCall(methodName, arguments...) m.mutex.Unlock() diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index 535f293490ce9..c5903f5dbb80e 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -14,23 +14,23 @@ import ( // Condition uses a Comparison to assert a complex condition. func Condition(t TestingT, comp assert.Comparison, msgAndArgs ...interface{}) { - if assert.Condition(t, comp, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Condition(t, comp, msgAndArgs...) { + return + } t.FailNow() } // Conditionf uses a Comparison to assert a complex condition. func Conditionf(t TestingT, comp assert.Comparison, msg string, args ...interface{}) { - if assert.Conditionf(t, comp, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Conditionf(t, comp, msg, args...) { + return + } t.FailNow() } @@ -41,12 +41,12 @@ func Conditionf(t TestingT, comp assert.Comparison, msg string, args ...interfac // assert.Contains(t, ["Hello", "World"], "World") // assert.Contains(t, {"Hello": "World"}, "Hello") func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { - if assert.Contains(t, s, contains, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Contains(t, s, contains, msgAndArgs...) { + return + } t.FailNow() } @@ -57,34 +57,34 @@ func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...int // assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") // assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { - if assert.Containsf(t, s, contains, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Containsf(t, s, contains, msg, args...) { + return + } t.FailNow() } // DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. func DirExists(t TestingT, path string, msgAndArgs ...interface{}) { - if assert.DirExists(t, path, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.DirExists(t, path, msgAndArgs...) { + return + } t.FailNow() } // DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. func DirExistsf(t TestingT, path string, msg string, args ...interface{}) { - if assert.DirExistsf(t, path, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.DirExistsf(t, path, msg, args...) { + return + } t.FailNow() } @@ -94,12 +94,12 @@ func DirExistsf(t TestingT, path string, msg string, args ...interface{}) { // // assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs ...interface{}) { - if assert.ElementsMatch(t, listA, listB, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.ElementsMatch(t, listA, listB, msgAndArgs...) { + return + } t.FailNow() } @@ -109,12 +109,12 @@ func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs // // assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) { - if assert.ElementsMatchf(t, listA, listB, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.ElementsMatchf(t, listA, listB, msg, args...) { + return + } t.FailNow() } @@ -123,12 +123,12 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string // // assert.Empty(t, obj) func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { - if assert.Empty(t, object, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Empty(t, object, msgAndArgs...) { + return + } t.FailNow() } @@ -137,12 +137,12 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { // // assert.Emptyf(t, obj, "error message %s", "formatted") func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { - if assert.Emptyf(t, object, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Emptyf(t, object, msg, args...) { + return + } t.FailNow() } @@ -154,12 +154,12 @@ func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { // referenced values (as opposed to the memory addresses). Function equality // cannot be determined and will always fail. func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { - if assert.Equal(t, expected, actual, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Equal(t, expected, actual, msgAndArgs...) { + return + } t.FailNow() } @@ -169,12 +169,12 @@ func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...i // actualObj, err := SomeFunction() // assert.EqualError(t, err, expectedErrorString) func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) { - if assert.EqualError(t, theError, errString, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.EqualError(t, theError, errString, msgAndArgs...) { + return + } t.FailNow() } @@ -184,12 +184,12 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte // actualObj, err := SomeFunction() // assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) { - if assert.EqualErrorf(t, theError, errString, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.EqualErrorf(t, theError, errString, msg, args...) { + return + } t.FailNow() } @@ -198,12 +198,12 @@ func EqualErrorf(t TestingT, theError error, errString string, msg string, args // // assert.EqualValues(t, uint32(123), int32(123)) func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { - if assert.EqualValues(t, expected, actual, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.EqualValues(t, expected, actual, msgAndArgs...) { + return + } t.FailNow() } @@ -212,12 +212,12 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg // // assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123)) func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { - if assert.EqualValuesf(t, expected, actual, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.EqualValuesf(t, expected, actual, msg, args...) { + return + } t.FailNow() } @@ -229,12 +229,12 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri // referenced values (as opposed to the memory addresses). Function equality // cannot be determined and will always fail. func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { - if assert.Equalf(t, expected, actual, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Equalf(t, expected, actual, msg, args...) { + return + } t.FailNow() } @@ -245,12 +245,12 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar // assert.Equal(t, expectedError, err) // } func Error(t TestingT, err error, msgAndArgs ...interface{}) { - if assert.Error(t, err, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Error(t, err, msgAndArgs...) { + return + } t.FailNow() } @@ -261,9 +261,37 @@ func Error(t TestingT, err error, msgAndArgs ...interface{}) { // assert.Equal(t, expectedErrorf, err) // } func Errorf(t TestingT, err error, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } if assert.Errorf(t, err, msg, args...) { return } + t.FailNow() +} + +// Eventually asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) +func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { + if assert.Eventually(t, condition, waitFor, tick, msgAndArgs...) { + return + } + if h, ok := t.(tHelper); ok { + h.Helper() + } + t.FailNow() +} + +// Eventuallyf asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { + if assert.Eventuallyf(t, condition, waitFor, tick, msg, args...) { + return + } if h, ok := t.(tHelper); ok { h.Helper() } @@ -274,12 +302,12 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) { // // assert.Exactly(t, int32(123), int64(123)) func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { - if assert.Exactly(t, expected, actual, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Exactly(t, expected, actual, msgAndArgs...) { + return + } t.FailNow() } @@ -287,56 +315,56 @@ func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs .. // // assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123)) func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { - if assert.Exactlyf(t, expected, actual, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Exactlyf(t, expected, actual, msg, args...) { + return + } t.FailNow() } // Fail reports a failure through func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) { - if assert.Fail(t, failureMessage, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Fail(t, failureMessage, msgAndArgs...) { + return + } t.FailNow() } // FailNow fails test func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) { - if assert.FailNow(t, failureMessage, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.FailNow(t, failureMessage, msgAndArgs...) { + return + } t.FailNow() } // FailNowf fails test func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) { - if assert.FailNowf(t, failureMessage, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.FailNowf(t, failureMessage, msg, args...) { + return + } t.FailNow() } // Failf reports a failure through func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) { - if assert.Failf(t, failureMessage, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Failf(t, failureMessage, msg, args...) { + return + } t.FailNow() } @@ -344,12 +372,12 @@ func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) { // // assert.False(t, myBool) func False(t TestingT, value bool, msgAndArgs ...interface{}) { - if assert.False(t, value, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.False(t, value, msgAndArgs...) { + return + } t.FailNow() } @@ -357,34 +385,96 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) { // // assert.Falsef(t, myBool, "error message %s", "formatted") func Falsef(t TestingT, value bool, msg string, args ...interface{}) { - if assert.Falsef(t, value, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Falsef(t, value, msg, args...) { + return + } t.FailNow() } // FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. func FileExists(t TestingT, path string, msgAndArgs ...interface{}) { - if assert.FileExists(t, path, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.FileExists(t, path, msgAndArgs...) { + return + } t.FailNow() } // FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. func FileExistsf(t TestingT, path string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } if assert.FileExistsf(t, path, msg, args...) { return } + t.FailNow() +} + +// Greater asserts that the first element is greater than the second +// +// assert.Greater(t, 2, 1) +// assert.Greater(t, float64(2), float64(1)) +// assert.Greater(t, "b", "a") +func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Greater(t, e1, e2, msgAndArgs...) { + return + } + t.FailNow() +} + +// GreaterOrEqual asserts that the first element is greater than or equal to the second +// +// assert.GreaterOrEqual(t, 2, 1) +// assert.GreaterOrEqual(t, 2, 2) +// assert.GreaterOrEqual(t, "b", "a") +// assert.GreaterOrEqual(t, "b", "b") +func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.GreaterOrEqual(t, e1, e2, msgAndArgs...) { + return + } + t.FailNow() +} + +// GreaterOrEqualf asserts that the first element is greater than or equal to the second +// +// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") +// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") +// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") +// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") +func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.GreaterOrEqualf(t, e1, e2, msg, args...) { + return + } + t.FailNow() +} + +// Greaterf asserts that the first element is greater than the second +// +// assert.Greaterf(t, 2, 1, "error message %s", "formatted") +// assert.Greaterf(t, float64(2, "error message %s", "formatted"), float64(1)) +// assert.Greaterf(t, "b", "a", "error message %s", "formatted") +func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Greaterf(t, e1, e2, msg, args...) { + return + } t.FailNow() } @@ -395,12 +485,12 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) { // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { - if assert.HTTPBodyContains(t, handler, method, url, values, str, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.HTTPBodyContains(t, handler, method, url, values, str, msgAndArgs...) { + return + } t.FailNow() } @@ -411,12 +501,12 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url s // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { - if assert.HTTPBodyContainsf(t, handler, method, url, values, str, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.HTTPBodyContainsf(t, handler, method, url, values, str, msg, args...) { + return + } t.FailNow() } @@ -427,12 +517,12 @@ func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { - if assert.HTTPBodyNotContains(t, handler, method, url, values, str, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.HTTPBodyNotContains(t, handler, method, url, values, str, msgAndArgs...) { + return + } t.FailNow() } @@ -443,12 +533,12 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, ur // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { - if assert.HTTPBodyNotContainsf(t, handler, method, url, values, str, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.HTTPBodyNotContainsf(t, handler, method, url, values, str, msg, args...) { + return + } t.FailNow() } @@ -458,12 +548,12 @@ func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, u // // Returns whether the assertion was successful (true) or not (false). func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { - if assert.HTTPError(t, handler, method, url, values, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.HTTPError(t, handler, method, url, values, msgAndArgs...) { + return + } t.FailNow() } @@ -473,12 +563,12 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, // // Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { - if assert.HTTPErrorf(t, handler, method, url, values, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.HTTPErrorf(t, handler, method, url, values, msg, args...) { + return + } t.FailNow() } @@ -488,12 +578,12 @@ func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, // // Returns whether the assertion was successful (true) or not (false). func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { - if assert.HTTPRedirect(t, handler, method, url, values, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.HTTPRedirect(t, handler, method, url, values, msgAndArgs...) { + return + } t.FailNow() } @@ -503,12 +593,12 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url strin // // Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { - if assert.HTTPRedirectf(t, handler, method, url, values, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.HTTPRedirectf(t, handler, method, url, values, msg, args...) { + return + } t.FailNow() } @@ -518,12 +608,12 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { - if assert.HTTPSuccess(t, handler, method, url, values, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.HTTPSuccess(t, handler, method, url, values, msgAndArgs...) { + return + } t.FailNow() } @@ -533,12 +623,12 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { - if assert.HTTPSuccessf(t, handler, method, url, values, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.HTTPSuccessf(t, handler, method, url, values, msg, args...) { + return + } t.FailNow() } @@ -546,12 +636,12 @@ func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url strin // // assert.Implements(t, (*MyInterface)(nil), new(MyObject)) func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { - if assert.Implements(t, interfaceObject, object, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Implements(t, interfaceObject, object, msgAndArgs...) { + return + } t.FailNow() } @@ -559,12 +649,12 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg // // assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { - if assert.Implementsf(t, interfaceObject, object, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Implementsf(t, interfaceObject, object, msg, args...) { + return + } t.FailNow() } @@ -572,56 +662,56 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms // // assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { - if assert.InDelta(t, expected, actual, delta, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.InDelta(t, expected, actual, delta, msgAndArgs...) { + return + } t.FailNow() } // InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. func InDeltaMapValues(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { - if assert.InDeltaMapValues(t, expected, actual, delta, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.InDeltaMapValues(t, expected, actual, delta, msgAndArgs...) { + return + } t.FailNow() } // InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { - if assert.InDeltaMapValuesf(t, expected, actual, delta, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.InDeltaMapValuesf(t, expected, actual, delta, msg, args...) { + return + } t.FailNow() } // InDeltaSlice is the same as InDelta, except it compares two slices. func InDeltaSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { - if assert.InDeltaSlice(t, expected, actual, delta, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.InDeltaSlice(t, expected, actual, delta, msgAndArgs...) { + return + } t.FailNow() } // InDeltaSlicef is the same as InDelta, except it compares two slices. func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { - if assert.InDeltaSlicef(t, expected, actual, delta, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.InDeltaSlicef(t, expected, actual, delta, msg, args...) { + return + } t.FailNow() } @@ -629,132 +719,216 @@ func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta f // // assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { - if assert.InDeltaf(t, expected, actual, delta, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.InDeltaf(t, expected, actual, delta, msg, args...) { + return + } t.FailNow() } // InEpsilon asserts that expected and actual have a relative error less than epsilon func InEpsilon(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { - if assert.InEpsilon(t, expected, actual, epsilon, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.InEpsilon(t, expected, actual, epsilon, msgAndArgs...) { + return + } t.FailNow() } // InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. func InEpsilonSlice(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { - if assert.InEpsilonSlice(t, expected, actual, epsilon, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.InEpsilonSlice(t, expected, actual, epsilon, msgAndArgs...) { + return + } t.FailNow() } // InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) { - if assert.InEpsilonSlicef(t, expected, actual, epsilon, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.InEpsilonSlicef(t, expected, actual, epsilon, msg, args...) { + return + } t.FailNow() } // InEpsilonf asserts that expected and actual have a relative error less than epsilon func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) { - if assert.InEpsilonf(t, expected, actual, epsilon, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.InEpsilonf(t, expected, actual, epsilon, msg, args...) { + return + } t.FailNow() } // IsType asserts that the specified objects are of the same type. func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { - if assert.IsType(t, expectedType, object, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.IsType(t, expectedType, object, msgAndArgs...) { + return + } t.FailNow() } // IsTypef asserts that the specified objects are of the same type. func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } if assert.IsTypef(t, expectedType, object, msg, args...) { return } + t.FailNow() +} + +// JSONEq asserts that two JSON strings are equivalent. +// +// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.JSONEq(t, expected, actual, msgAndArgs...) { + return + } + t.FailNow() +} + +// JSONEqf asserts that two JSON strings are equivalent. +// +// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.JSONEqf(t, expected, actual, msg, args...) { + return + } + t.FailNow() +} + +// YAMLEq asserts that two YAML strings are equivalent. +func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.YAMLEq(t, expected, actual, msgAndArgs...) { + return + } + t.FailNow() +} + +// YAMLEqf asserts that two YAML strings are equivalent. +func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.YAMLEqf(t, expected, actual, msg, args...) { + return + } + t.FailNow() +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// assert.Len(t, mySlice, 3) +func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Len(t, object, length, msgAndArgs...) { + return + } + t.FailNow() +} + +// Lenf asserts that the specified object has specific length. +// Lenf also fails if the object has a type that len() not accept. +// +// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") +func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Lenf(t, object, length, msg, args...) { + return + } t.FailNow() } -// JSONEq asserts that two JSON strings are equivalent. +// Less asserts that the first element is less than the second // -// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { - if assert.JSONEq(t, expected, actual, msgAndArgs...) { - return - } +// assert.Less(t, 1, 2) +// assert.Less(t, float64(1), float64(2)) +// assert.Less(t, "a", "b") +func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Less(t, e1, e2, msgAndArgs...) { + return + } t.FailNow() } -// JSONEqf asserts that two JSON strings are equivalent. +// LessOrEqual asserts that the first element is less than or equal to the second // -// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") -func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { - if assert.JSONEqf(t, expected, actual, msg, args...) { - return - } +// assert.LessOrEqual(t, 1, 2) +// assert.LessOrEqual(t, 2, 2) +// assert.LessOrEqual(t, "a", "b") +// assert.LessOrEqual(t, "b", "b") +func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } + if assert.LessOrEqual(t, e1, e2, msgAndArgs...) { + return + } t.FailNow() } -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. +// LessOrEqualf asserts that the first element is less than or equal to the second // -// assert.Len(t, mySlice, 3) -func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) { - if assert.Len(t, object, length, msgAndArgs...) { - return - } +// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") +// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") +// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") +// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") +func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } + if assert.LessOrEqualf(t, e1, e2, msg, args...) { + return + } t.FailNow() } -// Lenf asserts that the specified object has specific length. -// Lenf also fails if the object has a type that len() not accept. +// Lessf asserts that the first element is less than the second // -// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") -func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) { - if assert.Lenf(t, object, length, msg, args...) { - return - } +// assert.Lessf(t, 1, 2, "error message %s", "formatted") +// assert.Lessf(t, float64(1, "error message %s", "formatted"), float64(2)) +// assert.Lessf(t, "a", "b", "error message %s", "formatted") +func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Lessf(t, e1, e2, msg, args...) { + return + } t.FailNow() } @@ -762,12 +936,12 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf // // assert.Nil(t, err) func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { - if assert.Nil(t, object, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Nil(t, object, msgAndArgs...) { + return + } t.FailNow() } @@ -775,12 +949,12 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { // // assert.Nilf(t, err, "error message %s", "formatted") func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) { - if assert.Nilf(t, object, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Nilf(t, object, msg, args...) { + return + } t.FailNow() } @@ -791,12 +965,12 @@ func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) { // assert.Equal(t, expectedObj, actualObj) // } func NoError(t TestingT, err error, msgAndArgs ...interface{}) { - if assert.NoError(t, err, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NoError(t, err, msgAndArgs...) { + return + } t.FailNow() } @@ -807,12 +981,12 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) { // assert.Equal(t, expectedObj, actualObj) // } func NoErrorf(t TestingT, err error, msg string, args ...interface{}) { - if assert.NoErrorf(t, err, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NoErrorf(t, err, msg, args...) { + return + } t.FailNow() } @@ -823,12 +997,12 @@ func NoErrorf(t TestingT, err error, msg string, args ...interface{}) { // assert.NotContains(t, ["Hello", "World"], "Earth") // assert.NotContains(t, {"Hello": "World"}, "Earth") func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { - if assert.NotContains(t, s, contains, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotContains(t, s, contains, msgAndArgs...) { + return + } t.FailNow() } @@ -839,12 +1013,12 @@ func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ... // assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") // assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { - if assert.NotContainsf(t, s, contains, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotContainsf(t, s, contains, msg, args...) { + return + } t.FailNow() } @@ -855,12 +1029,12 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a // assert.Equal(t, "two", obj[1]) // } func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { - if assert.NotEmpty(t, object, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotEmpty(t, object, msgAndArgs...) { + return + } t.FailNow() } @@ -871,12 +1045,12 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { // assert.Equal(t, "two", obj[1]) // } func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) { - if assert.NotEmptyf(t, object, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotEmptyf(t, object, msg, args...) { + return + } t.FailNow() } @@ -887,12 +1061,12 @@ func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { - if assert.NotEqual(t, expected, actual, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotEqual(t, expected, actual, msgAndArgs...) { + return + } t.FailNow() } @@ -903,12 +1077,12 @@ func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs . // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { - if assert.NotEqualf(t, expected, actual, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotEqualf(t, expected, actual, msg, args...) { + return + } t.FailNow() } @@ -916,12 +1090,12 @@ func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, // // assert.NotNil(t, err) func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { - if assert.NotNil(t, object, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotNil(t, object, msgAndArgs...) { + return + } t.FailNow() } @@ -929,12 +1103,12 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { // // assert.NotNilf(t, err, "error message %s", "formatted") func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { - if assert.NotNilf(t, object, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotNilf(t, object, msg, args...) { + return + } t.FailNow() } @@ -942,12 +1116,12 @@ func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { // // assert.NotPanics(t, func(){ RemainCalm() }) func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { - if assert.NotPanics(t, f, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotPanics(t, f, msgAndArgs...) { + return + } t.FailNow() } @@ -955,12 +1129,12 @@ func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { // // assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { - if assert.NotPanicsf(t, f, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotPanicsf(t, f, msg, args...) { + return + } t.FailNow() } @@ -969,12 +1143,12 @@ func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interfac // assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") // assert.NotRegexp(t, "^start", "it's not starting") func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { - if assert.NotRegexp(t, rx, str, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotRegexp(t, rx, str, msgAndArgs...) { + return + } t.FailNow() } @@ -983,12 +1157,12 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf // assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") // assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { - if assert.NotRegexpf(t, rx, str, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotRegexpf(t, rx, str, msg, args...) { + return + } t.FailNow() } @@ -997,12 +1171,12 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args .. // // assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { - if assert.NotSubset(t, list, subset, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotSubset(t, list, subset, msgAndArgs...) { + return + } t.FailNow() } @@ -1011,34 +1185,34 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i // // assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { - if assert.NotSubsetf(t, list, subset, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotSubsetf(t, list, subset, msg, args...) { + return + } t.FailNow() } // NotZero asserts that i is not the zero value for its type. func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) { - if assert.NotZero(t, i, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotZero(t, i, msgAndArgs...) { + return + } t.FailNow() } // NotZerof asserts that i is not the zero value for its type. func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) { - if assert.NotZerof(t, i, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.NotZerof(t, i, msg, args...) { + return + } t.FailNow() } @@ -1046,12 +1220,12 @@ func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) { // // assert.Panics(t, func(){ GoCrazy() }) func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { - if assert.Panics(t, f, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Panics(t, f, msgAndArgs...) { + return + } t.FailNow() } @@ -1060,12 +1234,12 @@ func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { // // assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) { - if assert.PanicsWithValue(t, expected, f, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.PanicsWithValue(t, expected, f, msgAndArgs...) { + return + } t.FailNow() } @@ -1074,12 +1248,12 @@ func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, m // // assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) { - if assert.PanicsWithValuef(t, expected, f, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.PanicsWithValuef(t, expected, f, msg, args...) { + return + } t.FailNow() } @@ -1087,12 +1261,12 @@ func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, // // assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { - if assert.Panicsf(t, f, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Panicsf(t, f, msg, args...) { + return + } t.FailNow() } @@ -1101,12 +1275,12 @@ func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{} // assert.Regexp(t, regexp.MustCompile("start"), "it's starting") // assert.Regexp(t, "start...$", "it's not starting") func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { - if assert.Regexp(t, rx, str, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Regexp(t, rx, str, msgAndArgs...) { + return + } t.FailNow() } @@ -1115,12 +1289,44 @@ func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface // assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") // assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } if assert.Regexpf(t, rx, str, msg, args...) { return } + t.FailNow() +} + +// Same asserts that two pointers reference the same object. +// +// assert.Same(t, ptr1, ptr2) +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func Same(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.Same(t, expected, actual, msgAndArgs...) { + return + } + t.FailNow() +} + +// Samef asserts that two pointers reference the same object. +// +// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func Samef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Samef(t, expected, actual, msg, args...) { + return + } t.FailNow() } @@ -1129,12 +1335,12 @@ func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...in // // assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { - if assert.Subset(t, list, subset, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Subset(t, list, subset, msgAndArgs...) { + return + } t.FailNow() } @@ -1143,12 +1349,12 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte // // assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { - if assert.Subsetf(t, list, subset, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Subsetf(t, list, subset, msg, args...) { + return + } t.FailNow() } @@ -1156,12 +1362,12 @@ func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args // // assert.True(t, myBool) func True(t TestingT, value bool, msgAndArgs ...interface{}) { - if assert.True(t, value, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.True(t, value, msgAndArgs...) { + return + } t.FailNow() } @@ -1169,12 +1375,12 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) { // // assert.Truef(t, myBool, "error message %s", "formatted") func Truef(t TestingT, value bool, msg string, args ...interface{}) { - if assert.Truef(t, value, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Truef(t, value, msg, args...) { + return + } t.FailNow() } @@ -1182,12 +1388,12 @@ func Truef(t TestingT, value bool, msg string, args ...interface{}) { // // assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { - if assert.WithinDuration(t, expected, actual, delta, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.WithinDuration(t, expected, actual, delta, msgAndArgs...) { + return + } t.FailNow() } @@ -1195,33 +1401,33 @@ func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time // // assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) { - if assert.WithinDurationf(t, expected, actual, delta, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.WithinDurationf(t, expected, actual, delta, msg, args...) { + return + } t.FailNow() } // Zero asserts that i is the zero value for its type. func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) { - if assert.Zero(t, i, msgAndArgs...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Zero(t, i, msgAndArgs...) { + return + } t.FailNow() } // Zerof asserts that i is the zero value for its type. func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) { - if assert.Zerof(t, i, msg, args...) { - return - } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.Zerof(t, i, msg, args...) { + return + } t.FailNow() } diff --git a/vendor/github.com/stretchr/testify/require/require.go.tmpl b/vendor/github.com/stretchr/testify/require/require.go.tmpl index 6ffc751b5e507..55e42ddebdc45 100644 --- a/vendor/github.com/stretchr/testify/require/require.go.tmpl +++ b/vendor/github.com/stretchr/testify/require/require.go.tmpl @@ -1,6 +1,6 @@ {{.Comment}} func {{.DocInfo.Name}}(t TestingT, {{.Params}}) { - if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return } if h, ok := t.(tHelper); ok { h.Helper() } + if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return } t.FailNow() } diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index 9fe41dbdc0cb1..804fae035bcce 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -216,6 +216,28 @@ func (a *Assertions) Errorf(err error, msg string, args ...interface{}) { Errorf(a.t, err, msg, args...) } +// Eventually asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond) +func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Eventually(a.t, condition, waitFor, tick, msgAndArgs...) +} + +// Eventuallyf asserts that given condition will be met in waitFor time, +// periodically checking target function each tick. +// +// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Eventuallyf(a.t, condition, waitFor, tick, msg, args...) +} + // Exactly asserts that two objects are equal in value and type. // // a.Exactly(int32(123), int64(123)) @@ -304,6 +326,56 @@ func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) { FileExistsf(a.t, path, msg, args...) } +// Greater asserts that the first element is greater than the second +// +// a.Greater(2, 1) +// a.Greater(float64(2), float64(1)) +// a.Greater("b", "a") +func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Greater(a.t, e1, e2, msgAndArgs...) +} + +// GreaterOrEqual asserts that the first element is greater than or equal to the second +// +// a.GreaterOrEqual(2, 1) +// a.GreaterOrEqual(2, 2) +// a.GreaterOrEqual("b", "a") +// a.GreaterOrEqual("b", "b") +func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + GreaterOrEqual(a.t, e1, e2, msgAndArgs...) +} + +// GreaterOrEqualf asserts that the first element is greater than or equal to the second +// +// a.GreaterOrEqualf(2, 1, "error message %s", "formatted") +// a.GreaterOrEqualf(2, 2, "error message %s", "formatted") +// a.GreaterOrEqualf("b", "a", "error message %s", "formatted") +// a.GreaterOrEqualf("b", "b", "error message %s", "formatted") +func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + GreaterOrEqualf(a.t, e1, e2, msg, args...) +} + +// Greaterf asserts that the first element is greater than the second +// +// a.Greaterf(2, 1, "error message %s", "formatted") +// a.Greaterf(float64(2, "error message %s", "formatted"), float64(1)) +// a.Greaterf("b", "a", "error message %s", "formatted") +func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Greaterf(a.t, e1, e2, msg, args...) +} + // HTTPBodyContains asserts that a specified handler returns a // body that contains a string. // @@ -568,6 +640,22 @@ func (a *Assertions) JSONEqf(expected string, actual string, msg string, args .. JSONEqf(a.t, expected, actual, msg, args...) } +// YAMLEq asserts that two YAML strings are equivalent. +func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + YAMLEq(a.t, expected, actual, msgAndArgs...) +} + +// YAMLEqf asserts that two YAML strings are equivalent. +func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + YAMLEqf(a.t, expected, actual, msg, args...) +} + // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // @@ -590,6 +678,56 @@ func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...in Lenf(a.t, object, length, msg, args...) } +// Less asserts that the first element is less than the second +// +// a.Less(1, 2) +// a.Less(float64(1), float64(2)) +// a.Less("a", "b") +func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Less(a.t, e1, e2, msgAndArgs...) +} + +// LessOrEqual asserts that the first element is less than or equal to the second +// +// a.LessOrEqual(1, 2) +// a.LessOrEqual(2, 2) +// a.LessOrEqual("a", "b") +// a.LessOrEqual("b", "b") +func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + LessOrEqual(a.t, e1, e2, msgAndArgs...) +} + +// LessOrEqualf asserts that the first element is less than or equal to the second +// +// a.LessOrEqualf(1, 2, "error message %s", "formatted") +// a.LessOrEqualf(2, 2, "error message %s", "formatted") +// a.LessOrEqualf("a", "b", "error message %s", "formatted") +// a.LessOrEqualf("b", "b", "error message %s", "formatted") +func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + LessOrEqualf(a.t, e1, e2, msg, args...) +} + +// Lessf asserts that the first element is less than the second +// +// a.Lessf(1, 2, "error message %s", "formatted") +// a.Lessf(float64(1, "error message %s", "formatted"), float64(2)) +// a.Lessf("a", "b", "error message %s", "formatted") +func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Lessf(a.t, e1, e2, msg, args...) +} + // Nil asserts that the specified object is nil. // // a.Nil(err) @@ -878,6 +1016,32 @@ func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args . Regexpf(a.t, rx, str, msg, args...) } +// Same asserts that two pointers reference the same object. +// +// a.Same(ptr1, ptr2) +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func (a *Assertions) Same(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Same(a.t, expected, actual, msgAndArgs...) +} + +// Samef asserts that two pointers reference the same object. +// +// a.Samef(ptr1, ptr2, "error message %s", "formatted") +// +// Both arguments must be pointer variables. Pointer variable sameness is +// determined based on the equality of both type and value. +func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + Samef(a.t, expected, actual, msg, args...) +} + // Subset asserts that the specified list(array, slice...) contains all // elements given in the specified subset(array, slice...). // diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 61b49647b9aed..7dfe201a36ee7 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -644,6 +644,8 @@ func (tml *Tokenmandatorylabel) Size() uint32 { //sys DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) = advapi32.DuplicateTokenEx //sys GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) = userenv.GetUserProfileDirectoryW //sys getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetSystemDirectoryW +//sys getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetWindowsDirectoryW +//sys getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetSystemWindowsDirectoryW // An access token contains the security information for a logon session. // The system creates an access token when a user logs on, and every @@ -785,8 +787,8 @@ func (token Token) GetLinkedToken() (Token, error) { return linkedToken, nil } -// GetSystemDirectory retrieves path to current location of the system -// directory, which is typically, though not always, C:\Windows\System32. +// GetSystemDirectory retrieves the path to current location of the system +// directory, which is typically, though not always, `C:\Windows\System32`. func GetSystemDirectory() (string, error) { n := uint32(MAX_PATH) for { @@ -802,6 +804,42 @@ func GetSystemDirectory() (string, error) { } } +// GetWindowsDirectory retrieves the path to current location of the Windows +// directory, which is typically, though not always, `C:\Windows`. This may +// be a private user directory in the case that the application is running +// under a terminal server. +func GetWindowsDirectory() (string, error) { + n := uint32(MAX_PATH) + for { + b := make([]uint16, n) + l, e := getWindowsDirectory(&b[0], n) + if e != nil { + return "", e + } + if l <= n { + return UTF16ToString(b[:l]), nil + } + n = l + } +} + +// GetSystemWindowsDirectory retrieves the path to current location of the +// Windows directory, which is typically, though not always, `C:\Windows`. +func GetSystemWindowsDirectory() (string, error) { + n := uint32(MAX_PATH) + for { + b := make([]uint16, n) + l, e := getSystemWindowsDirectory(&b[0], n) + if e != nil { + return "", e + } + if l <= n { + return UTF16ToString(b[:l]), nil + } + n = l + } +} + // IsMember reports whether the access token t is a member of the provided SID. func (t Token) IsMember(sid *SID) (bool, error) { var b int32 diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index b23050924f583..33e7d45ab1c24 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -269,6 +269,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) //sys GetProcessId(process Handle) (id uint32, err error) //sys OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) +//sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost // Volume Management Functions //sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW @@ -296,6 +297,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys coCreateGuid(pguid *GUID) (ret error) = ole32.CoCreateGuid //sys CoTaskMemFree(address unsafe.Pointer) = ole32.CoTaskMemFree //sys rtlGetVersion(info *OsVersionInfoEx) (ret error) = ntdll.RtlGetVersion +//sys rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) = ntdll.RtlGetNtVersionNumbers // syscall interface implementation for other packages @@ -1306,8 +1308,8 @@ func (t Token) KnownFolderPath(folderID *KNOWNFOLDERID, flags uint32) (string, e return UTF16ToString((*[(1 << 30) - 1]uint16)(unsafe.Pointer(p))[:]), nil } -// RtlGetVersion returns the true version of the underlying operating system, ignoring -// any manifesting or compatibility layers on top of the win32 layer. +// RtlGetVersion returns the version of the underlying operating system, ignoring +// manifest semantics but is affected by the application compatibility layer. func RtlGetVersion() *OsVersionInfoEx { info := &OsVersionInfoEx{} info.osVersionInfoSize = uint32(unsafe.Sizeof(*info)) @@ -1318,3 +1320,11 @@ func RtlGetVersion() *OsVersionInfoEx { _ = rtlGetVersion(info) return info } + +// RtlGetNtVersionNumbers returns the version of the underlying operating system, +// ignoring manifest semantics and the application compatibility layer. +func RtlGetNtVersionNumbers() (majorVersion, minorVersion, buildNumber uint32) { + rtlGetNtVersionNumbers(&majorVersion, &minorVersion, &buildNumber) + buildNumber &= 0xffff + return +} diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index d461bed98acc8..1c275cb935a20 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -209,6 +209,7 @@ var ( procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") procGetProcessId = modkernel32.NewProc("GetProcessId") procOpenThread = modkernel32.NewProc("OpenThread") + procSetProcessPriorityBoost = modkernel32.NewProc("SetProcessPriorityBoost") procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW") procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW") @@ -234,6 +235,7 @@ var ( procCoCreateGuid = modole32.NewProc("CoCreateGuid") procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") procRtlGetVersion = modntdll.NewProc("RtlGetVersion") + procRtlGetNtVersionNumbers = modntdll.NewProc("RtlGetNtVersionNumbers") procWSAStartup = modws2_32.NewProc("WSAStartup") procWSACleanup = modws2_32.NewProc("WSACleanup") procWSAIoctl = modws2_32.NewProc("WSAIoctl") @@ -303,6 +305,8 @@ var ( procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") procGetSystemDirectoryW = modkernel32.NewProc("GetSystemDirectoryW") + procGetWindowsDirectoryW = modkernel32.NewProc("GetWindowsDirectoryW") + procGetSystemWindowsDirectoryW = modkernel32.NewProc("GetSystemWindowsDirectoryW") procWTSQueryUserToken = modwtsapi32.NewProc("WTSQueryUserToken") procWTSEnumerateSessionsW = modwtsapi32.NewProc("WTSEnumerateSessionsW") procWTSFreeMemory = modwtsapi32.NewProc("WTSFreeMemory") @@ -2255,6 +2259,24 @@ func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (hand return } +func SetProcessPriorityBoost(process Handle, disable bool) (err error) { + var _p0 uint32 + if disable { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall(procSetProcessPriorityBoost.Addr(), 2, uintptr(process), uintptr(_p0), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) if r1 == 0 { @@ -2530,6 +2552,11 @@ func rtlGetVersion(info *OsVersionInfoEx) (ret error) { return } +func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) { + syscall.Syscall(procRtlGetNtVersionNumbers.Addr(), 3, uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) + return +} + func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) if r0 != 0 { @@ -3307,6 +3334,32 @@ func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { return } +func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + len = uint32(r0) + if len == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetSystemWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + len = uint32(r0) + if len == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func WTSQueryUserToken(session uint32, token *Token) (err error) { r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0) if r1 == 0 { diff --git a/vendor/modules.txt b/vendor/modules.txt index ead95a7791ca1..21498a5d4aa5f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -38,7 +38,7 @@ github.com/alecthomas/template/parse github.com/alecthomas/units # github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 github.com/armon/go-metrics -# github.com/aws/aws-sdk-go v1.22.4 +# github.com/aws/aws-sdk-go v1.23.12 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/awserr github.com/aws/aws-sdk-go/aws/awsutil @@ -61,6 +61,7 @@ github.com/aws/aws-sdk-go/aws/signer/v4 github.com/aws/aws-sdk-go/internal/ini github.com/aws/aws-sdk-go/internal/s3err github.com/aws/aws-sdk-go/internal/sdkio +github.com/aws/aws-sdk-go/internal/sdkmath github.com/aws/aws-sdk-go/internal/sdkrand github.com/aws/aws-sdk-go/internal/sdkuri github.com/aws/aws-sdk-go/internal/shareddefaults @@ -90,7 +91,7 @@ github.com/beorn7/perks/quantile github.com/blang/semver # github.com/bmatcuk/doublestar v1.1.1 github.com/bmatcuk/doublestar -# github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737 +# github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668 github.com/bradfitz/gomemcache/memcache # github.com/census-instrumentation/opencensus-proto v0.2.1 github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1 @@ -112,7 +113,7 @@ github.com/coreos/go-systemd/sdjournal # github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f github.com/coreos/pkg/capnslog github.com/coreos/pkg/dlopen -# github.com/cortexproject/cortex v0.2.0 +# github.com/cortexproject/cortex v0.2.1-0.20191003165238-857bb8476e59 github.com/cortexproject/cortex/pkg/chunk github.com/cortexproject/cortex/pkg/chunk/aws github.com/cortexproject/cortex/pkg/chunk/cache @@ -243,6 +244,9 @@ github.com/golang/protobuf/ptypes/timestamp github.com/golang/protobuf/ptypes/wrappers # github.com/golang/snappy v0.0.1 github.com/golang/snappy +# github.com/gomodule/redigo v2.0.0+incompatible +github.com/gomodule/redigo/internal +github.com/gomodule/redigo/redis # github.com/google/btree v1.0.0 github.com/google/btree # github.com/google/go-cmp v0.3.1 @@ -377,7 +381,7 @@ github.com/prometheus/client_golang/prometheus/promhttp github.com/prometheus/client_golang/prometheus/testutil # github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.6.0 +# github.com/prometheus/common v0.7.0 github.com/prometheus/common/config github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg @@ -386,7 +390,7 @@ github.com/prometheus/common/version # github.com/prometheus/procfs v0.0.3 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs -# github.com/prometheus/prometheus v0.0.0-20190818123050-43acd0e2e93f +# github.com/prometheus/prometheus v1.8.2-0.20190918104050-8744afdd1ea0 github.com/prometheus/prometheus/discovery github.com/prometheus/prometheus/discovery/azure github.com/prometheus/prometheus/discovery/config @@ -429,8 +433,6 @@ github.com/prometheus/prometheus/util/strutil github.com/prometheus/prometheus/util/teststorage github.com/prometheus/prometheus/util/testutil github.com/prometheus/prometheus/util/treecache -# github.com/prometheus/tsdb v0.9.1 -github.com/prometheus/tsdb/chunkenc # github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75 github.com/samuel/go-zookeeper/zk # github.com/sercand/kuberesolver v2.1.0+incompatible @@ -449,7 +451,7 @@ github.com/soheilhy/cmux github.com/spf13/pflag # github.com/stretchr/objx v0.2.0 github.com/stretchr/objx -# github.com/stretchr/testify v1.3.0 +# github.com/stretchr/testify v1.4.0 github.com/stretchr/testify/assert github.com/stretchr/testify/mock github.com/stretchr/testify/require @@ -637,7 +639,7 @@ golang.org/x/oauth2/jwt # golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a +# golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 golang.org/x/sys/unix golang.org/x/sys/windows # golang.org/x/text v0.3.2