diff --git a/Gopkg.lock b/Gopkg.lock index 9190c8313..e37cf5280 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -2,57 +2,72 @@ [[projects]] + digest = "1:9a7a1f8cd0991a7c3549700b57c4ea1c213dadf7eb94d69e59ad3600490f81bc" name = "cloud.google.com/go" packages = [ "compute/metadata", - "internal" + "internal", ] + pruneopts = "NUT" revision = "2e6a95edb1071d750f6d7db777bf66cd2997af6c" version = "v0.7.0" [[projects]] + digest = "1:253580c3e9e495538385a96f276a0e06f3ba90c73421df1258de8a8375ff4b09" name = "github.com/Azure/azure-sdk-for-go" packages = [ "arm/compute", - "arm/network" + "arm/network", ] + pruneopts = "NUT" revision = "bd73d950fa4440dae889bd9917bff7cef539f86e" [[projects]] + digest = "1:401dd46323a9f30c7cc9adef35f4961714caf74f61f8e8666f956bc158de9bba" name = "github.com/Azure/go-autorest" packages = [ "autorest", "autorest/azure", "autorest/date", "autorest/to", - "autorest/validation" + "autorest/validation", ] + pruneopts = "NUT" revision = "a2fdd780c9a50455cecd249b00bdc3eb73a78e31" [[projects]] + digest = "1:be3ccd9f881604e4dd6d15cccfa126aa309232f0ba075ae5f92d3ef729a62758" name = "github.com/BurntSushi/toml" packages = ["."] + pruneopts = "NUT" revision = "a368813c5e648fee92e5f6c30e3944ff9d5e8895" [[projects]] + digest = "1:d8ebbd207f3d3266d4423ce4860c9f3794956306ded6c7ba312ecc69cdfbf04c" name = "github.com/PuerkitoBio/purell" packages = ["."] + pruneopts = "NUT" revision = "0bcb03f4b4d0a9428594752bd2a3b9aa0a9d4bd4" version = "v1.1.0" [[projects]] branch = "master" + digest = "1:8098cd40cd09879efbf12e33bcd51ead4a66006ac802cd563a66c4f3373b9727" name = "github.com/PuerkitoBio/urlesc" packages = ["."] + pruneopts = "NUT" revision = "bbf7a2afc14f93e1e0a5c06df524fbd75e5031e5" [[projects]] + digest = "1:44c7344434890241dde2d0d70d2e47355a1e012549e28ccf06f925aa6f8271ff" name = "github.com/Sirupsen/logrus" packages = ["."] + pruneopts = "NUT" revision = "ba1b36c82c5e05c4f912a88eab0dcd91a171688f" version = "v0.11.5" [[projects]] + digest = "1:e3c230a3e97de27d6b74712ae4745be2c5400f0a6c4b9e348f5800be26f30a76" name = "github.com/aws/aws-sdk-go" packages = [ "aws", @@ -79,85 +94,107 @@ "private/protocol/xml/xmlutil", "service/autoscaling", "service/ec2", - "service/sts" + "service/sts", ] + pruneopts = "NUT" revision = "f6ea558f30e0a983d529b32c741e4caed17c7df0" version = "v1.8.16" [[projects]] branch = "master" + digest = "1:bfe817c134f8681840c8d6c02606982ea29a19fd48086e89aaa8dff7c5d837dc" name = "github.com/benbjohnson/tmpl" packages = ["."] + pruneopts = "NUT" revision = "8e77bc5fc07968736bb74f4b40b4c577028a61b6" [[projects]] branch = "master" + digest = "1:cb0535f5823b47df7dcb9768ebb6c000b79ad115472910c70efe93c9ed9b2315" name = "github.com/beorn7/perks" packages = ["quantile"] + pruneopts = "NUT" revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9" [[projects]] + digest = "1:aba270497eb2d49f5cba6f4162d524b9a1195a24cbce8be20bf56a0051f47deb" name = "github.com/blang/semver" packages = ["."] + pruneopts = "NUT" revision = "b38d23b8782a487059e8fc8773e9a5b228a77cb6" version = "v3.5.0" [[projects]] + digest = "1:4c7a379d06f493d3cf4301a08667d9b83011a92b1d9defc7ec58a10ade19e796" name = "github.com/boltdb/bolt" packages = ["."] + pruneopts = "NUT" revision = "583e8937c61f1af6513608ccc75c97b6abdf4ff9" version = "v1.3.0" [[projects]] + digest = "1:60488563e453e4bbf9f5387fe94c9dd3d28372dc993c5f89104b8396c3593b21" name = "github.com/cenkalti/backoff" packages = ["."] + pruneopts = "NUT" revision = "32cd0c5b3aef12c76ed64aaf678f6c79736be7dc" version = "v1.0.0" [[projects]] branch = "master" + digest = "1:4c313de62fe9184560948eaa94714403a73d3026777a0da276550b7379cf6c80" name = "github.com/coreos/go-oidc" packages = [ "http", "jose", "key", "oauth2", - "oidc" + "oidc", ] + pruneopts = "NUT" revision = "be73733bb8cc830d0205609b95d125215f8e9c70" [[projects]] + digest = "1:e2c6ad2e212c0c9a8b13e5d1d52a6cd3a42411b636fcea10d286f7b6fc13fea0" name = "github.com/coreos/pkg" packages = [ "health", "httputil", - "timeutil" + "timeutil", ] + pruneopts = "NUT" revision = "3ac0863d7acf3bc44daf49afef8919af12f704ef" version = "v3" [[projects]] branch = "master" + digest = "1:a2c1d0e43bd3baaa071d1b9ed72c27d78169b2b269f71c105ac4ba34b1be4a39" name = "github.com/davecgh/go-spew" packages = ["spew"] + pruneopts = "NUT" revision = "346938d642f2ec3594ed81d874461961cd0faa76" [[projects]] + digest = "1:bfebf84ecf0c67e060f1e92cfcf3d4cbf4428a26a8b3cc258a64d7330f4de31b" name = "github.com/dgrijalva/jwt-go" packages = ["."] + pruneopts = "NUT" revision = "d2709f9f1f31ebcda9651b03077758c1f3a0018c" version = "v3.0.0" [[projects]] + digest = "1:516dc8a4003a0ad8aaf2b07d41bafb3c9772557ee8c819b36c28536cc30d0ac2" name = "github.com/docker/distribution" packages = [ "digest", - "reference" + "reference", ] + pruneopts = "NUT" revision = "a25b9ef0c9fe242ac04bb20d3a028442b7d266b6" version = "v2.6.1" [[projects]] + digest = "1:740dc3cccfcdb302c323d17da5f4f2dfa65f7b8c666e0a9ac8bc64f560fb2974" name = "github.com/docker/docker" packages = [ "api/types", @@ -169,113 +206,145 @@ "api/types/registry", "api/types/strslice", "api/types/swarm", - "api/types/versions" + "api/types/versions", ] + pruneopts = "NUT" revision = "092cba3727bb9b4a2f0e922cd6c0f93ea270e363" version = "v1.13.1" [[projects]] + digest = "1:be0211ba62c6b44e36a747047b55d45ad23288920799edda7e8b59da852cbc2c" name = "github.com/docker/go-connections" packages = ["nat"] + pruneopts = "NUT" revision = "990a1a1a70b0da4c4cb70e117971a4f0babfbf1a" version = "v0.2.1" [[projects]] + digest = "1:cb9111a5f582f106b83203d29e92e28a02d1eba0e6ea20f61d67ae54b6a8dbfe" name = "github.com/docker/go-units" packages = ["."] + pruneopts = "NUT" revision = "f2d77a61e3c169b43402a0a1e84f06daf29b8190" version = "v0.3.1" [[projects]] branch = "master" + digest = "1:3f451047834b63dcc35c5dc54d707440606cfd5b53c730a5f03bebd04d8e6af5" name = "github.com/dustin/go-humanize" packages = ["."] + pruneopts = "NUT" revision = "259d2a102b871d17f30e3cd9881a642961a1e486" [[projects]] + digest = "1:492d2263bad08c906b12d9b7bb31213698d801dd01d033081139bd60d9009ca4" name = "github.com/eclipse/paho.mqtt.golang" packages = [ ".", - "packets" + "packets", ] + pruneopts = "NUT" revision = "45f9b18f4864c81d49c3ed01e5faec9eeb05de31" version = "v1.0.0" [[projects]] + digest = "1:11652d24c6b9574c5f4a94370c0e23cbb46aec620f13f0c4f4bdc4635c2989e8" name = "github.com/emicklei/go-restful" packages = [ ".", "log", - "swagger" + "swagger", ] + pruneopts = "NUT" revision = "777bb3f19bcafe2575ffb2a3e46af92509ae9594" version = "v1.2" [[projects]] branch = "master" + digest = "1:df767c6ddf21aad6abd0da659e246cad6843e78ced90fb019a1625a12e571465" name = "github.com/evanphx/json-patch" packages = ["."] + pruneopts = "NUT" revision = "30afec6a1650c11c861dc1fb58e100cd5219e490" [[projects]] branch = "master" + digest = "1:a546bd0e6ce4ebdcf79507110d9498f697e154e5624e5e84dd2ca2efc776ae32" name = "github.com/geoffgarside/ber" packages = ["."] + pruneopts = "NUT" revision = "854377f11dfb81f04121879829bc53487e377739" [[projects]] branch = "master" + digest = "1:81466b4218bf6adddac2572a30ac733a9255919bc2f470b4827a317bd4ee1756" name = "github.com/ghodss/yaml" packages = ["."] + pruneopts = "NUT" revision = "04f313413ffd65ce25f2541bfd2b2ceec5c0908c" [[projects]] + digest = "1:27b47997dc05f1af7b96d113754bd86ab1e7983d544cfd3bc7f48ec3e31ea3f5" name = "github.com/go-ini/ini" packages = ["."] + pruneopts = "NUT" revision = "e7fea39b01aea8d5671f6858f0532f56e8bff3a5" version = "v1.27.0" [[projects]] branch = "master" + digest = "1:260f7ebefc63024c8dfe2c9f1a2935a89fa4213637a1f522f592f80c001cc441" name = "github.com/go-openapi/jsonpointer" packages = ["."] + pruneopts = "NUT" revision = "779f45308c19820f1a69e9a4cd965f496e0da10f" [[projects]] branch = "master" + digest = "1:98abd61947ff5c7c6fcfec5473d02a4821ed3a2dd99a4fbfdb7925b0dd745546" name = "github.com/go-openapi/jsonreference" packages = ["."] + pruneopts = "NUT" revision = "36d33bfe519efae5632669801b180bf1a245da3b" [[projects]] branch = "master" + digest = "1:ec09a363ecf22580db64d27f9d2c2604cea79e69bace2e50d66af15a492f82c1" name = "github.com/go-openapi/spec" packages = ["."] + pruneopts = "NUT" revision = "e51c28f07047ad90caff03f6450908720d337e0c" [[projects]] branch = "master" + digest = "1:dd9842008e1f630db0d091aa3774103b16bc972cf989492b71f734a03ebd6b5c" name = "github.com/go-openapi/swag" packages = ["."] + pruneopts = "NUT" revision = "24ebf76d720bab64f62824d76bced3184a65490d" [[projects]] + digest = "1:a6afc27b2a73a5506832f3c5a1c19a30772cb69e7bd1ced4639eb36a55db224f" name = "github.com/gogo/protobuf" packages = [ "proto", - "sortkeys" + "sortkeys", ] + pruneopts = "NUT" revision = "100ba4e885062801d56799d78530b73b178a78f3" version = "v0.4" [[projects]] branch = "master" + digest = "1:e2b86e41f3d669fc36b50d31d32d22c8ac656c75aa5ea89717ce7177e134ff2a" name = "github.com/golang/glog" packages = ["."] + pruneopts = "NUT" revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" [[projects]] branch = "master" + digest = "1:065785c3265dc118dda15e31fb57e6ceface395a94b09cce8cd2c8fa8ce7b974" name = "github.com/golang/protobuf" packages = [ "proto", @@ -283,77 +352,99 @@ "protoc-gen-go/descriptor", "protoc-gen-go/generator", "protoc-gen-go/grpc", - "protoc-gen-go/plugin" + "protoc-gen-go/plugin", ] + pruneopts = "NUT" revision = "2bba0603135d7d7f5cb73b2125beeda19c09f4ef" [[projects]] branch = "master" + digest = "1:9413ddbde906f91f062fda0dfa9a7cff43458cd1b2282c0fa25c61d89300b116" name = "github.com/golang/snappy" packages = ["."] + pruneopts = "NUT" revision = "553a641470496b2327abcac10b36396bd98e45c9" [[projects]] + digest = "1:c32382738658c8f0e5c8e488967cc4cf1c795481ec8c62505b8976d2a8ad0c42" name = "github.com/google/go-cmp" packages = [ "cmp", "cmp/cmpopts", "cmp/internal/diff", "cmp/internal/function", - "cmp/internal/value" + "cmp/internal/value", ] + pruneopts = "NUT" revision = "8099a9787ce5dc5984ed879a3bda47dc730a8e97" version = "v0.1.0" [[projects]] branch = "master" + digest = "1:f9425215dccf1c63f659ec781ca46bc81804341821d0cd8d2459c5b58f8bd067" name = "github.com/google/gofuzz" packages = ["."] + pruneopts = "NUT" revision = "44d81051d367757e1c7c6a5a86423ece9afcf63c" [[projects]] branch = "master" + digest = "1:1ab18cf8c2084968d6dca0dd46fbda9efba08664ecd7957b63c7ca57bb2455df" name = "github.com/google/uuid" packages = ["."] + pruneopts = "NUT" revision = "6a5e28554805e78ea6141142aba763936c4761c0" [[projects]] branch = "master" + digest = "1:b394d36f2403ca6e55d6de105ad73522c58919d19e31b0505f05c5afaa302ca7" name = "github.com/googleapis/gax-go" packages = ["."] + pruneopts = "NUT" revision = "9af46dd5a1713e8b5cd71106287eba3cefdde50b" [[projects]] + digest = "1:412beefef71413b580631c12a681ba2acab1ffdad9f967c38ed37e19fd101631" name = "github.com/gorhill/cronexpr" packages = ["."] + pruneopts = "NUT" revision = "a557574d6c024ed6e36acc8b610f5f211c91568a" version = "1.0.0" [[projects]] + digest = "1:064c7f0ccdb4036791092fb93ec214a6f09119711801b9e587b6d1e76acc55de" name = "github.com/hashicorp/consul" packages = ["api"] + pruneopts = "NUT" revision = "e9ca44d0a1757ac9aecc6785904a701936c10e4a" version = "v0.8.1" [[projects]] branch = "master" + digest = "1:7b699584752575e81e3f4e8b00cfb3e5d6fa5419d5d212ef925e02c798847464" name = "github.com/hashicorp/go-cleanhttp" packages = ["."] + pruneopts = "NUT" revision = "3573b8b52aa7b37b9358d966a898feb387f62437" [[projects]] branch = "master" + digest = "1:cdb5ce76cd7af19e3d2d5ba9b6458a2ee804f0d376711215dd3df5f51100d423" name = "github.com/hashicorp/go-rootcerts" packages = ["."] + pruneopts = "NUT" revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00" [[projects]] + digest = "1:0dd7b7b01769f9df356dc99f9e4144bdbabf6c79041ea7c0892379c5737f3c44" name = "github.com/hashicorp/serf" packages = ["coordinate"] + pruneopts = "NUT" revision = "d6574a5bb1226678d7010325fb6c985db20ee458" version = "v0.8.1" [[projects]] + digest = "1:4b32a332c32e1626280df4aceecc6b9eeaa8aa11e5083926ed41b89409ec40b6" name = "github.com/influxdata/influxdb" packages = [ ".", @@ -375,54 +466,70 @@ "toml", "tsdb", "tsdb/internal", - "uuid" + "uuid", ] + pruneopts = "NUT" revision = "e4628bb69266dbd624dc27d674b52705ce0dcbf2" version = "v1.1.4" [[projects]] branch = "master" + digest = "1:a6411d501f20aa4325c2cef806205a4b4802aec94b296f495db662c6ef46c787" name = "github.com/influxdata/usage-client" packages = ["v1"] + pruneopts = "NUT" revision = "6d3895376368aa52a3a81d2a16e90f0f52371967" [[projects]] branch = "master" + digest = "1:61187cdf67a554f26a184342401d2d7f40cb7064a77de4b52b099a4bc8368290" name = "github.com/influxdata/wlog" packages = ["."] + pruneopts = "NUT" revision = "7c63b0a71ef8300adc255344d275e10e5c3a71ec" [[projects]] + digest = "1:7c818eb119c69fc685573449e8f799596ba81827074b006ff0cb71052424f254" name = "github.com/jmespath/go-jmespath" packages = ["."] + pruneopts = "NUT" revision = "3433f3ea46d9f8019119e7dd41274e112a2359a9" version = "0.2.2" [[projects]] + digest = "1:6b1eae4bb93e5ccd23cb09d1e005ecb391316d27701b7a5264f8555a6e2f3d87" name = "github.com/jonboulle/clockwork" packages = ["."] + pruneopts = "NUT" revision = "2eee05ed794112d45db504eb05aa693efd2b8b09" version = "v0.1.0" [[projects]] branch = "master" + digest = "1:57719b5d47a6adf438ed209107c10d0da22e993916b326df3a41fcb6c9bad533" name = "github.com/juju/ratelimit" packages = ["."] + pruneopts = "NUT" revision = "acf38b000a03e4ab89e40f20f1e548f4e6ac7f72" [[projects]] + digest = "1:3a32e9d6c50f433ce5a8a65106c882ec0734a39863bdde6efa1f33c4d3e66acf" name = "github.com/k-sone/snmpgo" packages = ["."] + pruneopts = "NUT" revision = "de09377ff34857b08afdc16ea8c7c2929eb1fc6e" version = "v3.2.0" [[projects]] + digest = "1:805127e5bf73d46bf81aeb8eab8a28697880045761fa28d483308247c2898290" name = "github.com/kimor79/gollectd" packages = ["."] + pruneopts = "NUT" revision = "b5dddb1667dcc1e6355b9305e2c1608a2db6983c" version = "v1.0.0" [[projects]] + digest = "1:9ab29968625f38dd39ab1dbd50797549313de7881782a32be6f2ca664c99dd08" name = "github.com/mailru/easyjson" packages = [ ".", @@ -432,105 +539,135 @@ "gen", "jlexer", "jwriter", - "parser" + "parser", ] + pruneopts = "NUT" revision = "3fdea8d05856a0c8df22ed4bc71b3219245e4485" [[projects]] + digest = "1:cb591533458f6eb6e2c1065ff3eac6b50263d7847deb23fc9f79b25bc608970e" name = "github.com/mattn/go-runewidth" packages = ["."] + pruneopts = "NUT" revision = "9e777a8366cce605130a531d2cd6363d07ad7317" version = "v0.0.2" [[projects]] + digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6" name = "github.com/matttproud/golang_protobuf_extensions" packages = ["pbutil"] + pruneopts = "NUT" revision = "3247c84500bff8d9fb6d579d800f20b3e091582c" version = "v1.0.0" [[projects]] branch = "master" + digest = "1:bdff96d801b156a076211f2218e9a3d6ce942e087a8a5fb345b7a088bd65473f" name = "github.com/miekg/dns" packages = ["."] + pruneopts = "NUT" revision = "6ebcb714d36901126ee2807031543b38c56de963" [[projects]] branch = "master" + digest = "1:c16945365aa2772ae7347e8d944ff38abd385bf217a75852f4b490e1af06b1aa" name = "github.com/mitchellh/copystructure" packages = ["."] + pruneopts = "NUT" revision = "f81071c9d77b7931f78c90b416a074ecdc50e959" [[projects]] branch = "master" + digest = "1:b62c4f18ad6eb454ac5253e7791ded3d7867330015ca4b37b6336e57f514585e" name = "github.com/mitchellh/go-homedir" packages = ["."] + pruneopts = "NUT" revision = "b8bc1bf767474819792c23f32d8286a45736f1c6" [[projects]] branch = "master" + digest = "1:77ae0dd3bf0743d1baf1918b01858d423cf578826d70c8b59092ab52e4b9dfb9" name = "github.com/mitchellh/mapstructure" packages = ["."] + pruneopts = "NUT" revision = "5a0325d7fafaac12dda6e7fb8bd222ec1b69875e" [[projects]] branch = "master" + digest = "1:08893d896360bc28ab00692d16a1df1bea0bc8f52da93a8bea7b4d46e141c5c6" name = "github.com/mitchellh/reflectwalk" packages = ["."] + pruneopts = "NUT" revision = "417edcfd99a4d472c262e58f22b4bfe97580f03e" [[projects]] + digest = "1:c7754aaef62e30fefcc59727dcd71aea29e3c3868df0bd316bfe62e05131fafb" name = "github.com/pborman/uuid" packages = ["."] + pruneopts = "NUT" revision = "a97ce2ca70fa5a848076093f05e639a89ca34d06" version = "v1.0" [[projects]] + digest = "1:5cf3f025cbee5951a4ee961de067c8a89fc95a5adabead774f82822efabab121" name = "github.com/pkg/errors" packages = ["."] + pruneopts = "NUT" revision = "645ef00459ed84a119197bfb8d8205042c6df63d" version = "v0.8.0" [[projects]] + digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" name = "github.com/pmezard/go-difflib" packages = ["difflib"] + pruneopts = "NUT" revision = "792786c7400a136282c1664665ae0a8db921c6c2" version = "v1.0.0" [[projects]] + digest = "1:3e5fd795ebf6a9e13e67d644da76130af7a6003286531f9573f8074c228b66a3" name = "github.com/prometheus/client_golang" packages = ["prometheus"] + pruneopts = "NUT" revision = "c5b7fccd204277076155f10851dad72b76a49317" version = "v0.8.0" [[projects]] branch = "master" + digest = "1:32d10bdfa8f09ecf13598324dba86ab891f11db3c538b6a34d1c3b5b99d7c36b" name = "github.com/prometheus/client_model" packages = ["go"] + pruneopts = "NUT" revision = "6f3806018612930941127f2a7c6c453ba2c527d2" [[projects]] branch = "master" + digest = "1:4f7ffe5f45a74be4c6b116a0ff2e435edce68d639becebc1ecaaa22487e177bb" name = "github.com/prometheus/common" packages = [ "expfmt", "internal/bitbucket.org/ww/goautoneg", "log", "model", - "version" + "version", ] + pruneopts = "NUT" revision = "9e0844febd9e2856f839c9cb974fbd676d1755a8" [[projects]] branch = "master" + digest = "1:2a011811c4dae8274c3ced565b10f30ab9f43cb799e5a425607464c0c585c1c3" name = "github.com/prometheus/procfs" packages = [ ".", - "xfs" + "xfs", ] + pruneopts = "NUT" revision = "6ac8c5d890d415025dd5aae7595bcb2a6e7e2fad" [[projects]] - branch = "master" + branch = "logger-targetmanager-wait" + digest = "1:2aa5c16ddde403c807c8e4ddba9b3e31faac62ec86e2df6afafe032419f7d055" name = "github.com/prometheus/prometheus" packages = [ "config", @@ -557,67 +694,90 @@ "util/httputil", "util/strutil", "util/testutil", - "util/treecache" + "util/treecache", ] + pruneopts = "NUT" revision = "58298e738211f46cdab48c404e5514a544774579" source = "github.com/goller/prometheus" [[projects]] + digest = "1:118f00f400c10c1dd21a267fd04697c758dbe9a38cf5fa3fab3bb3625af9efe7" name = "github.com/russross/blackfriday" packages = ["."] + pruneopts = "NUT" revision = "0b647d0506a698cca42caca173e55559b12a69f2" version = "v1.4" [[projects]] branch = "master" + digest = "1:142520cf3c9bb85449dd0000f820b8c604531587ee654793c54909be7dabadac" name = "github.com/samuel/go-zookeeper" packages = ["zk"] + pruneopts = "NUT" revision = "1d7be4effb13d2d908342d349d71a284a7542693" [[projects]] - branch = "master" + digest = "1:b4b2e722848c38227ef9b926b7f93ba150d80e18fcf4aa3d1ea4830ca0de61a4" name = "github.com/segmentio/kafka-go" - packages = ["."] - revision = "140b1a903e1421f33b16a24312a19ff024e13625" + packages = [ + ".", + "sasl", + ] + pruneopts = "NUT" + revision = "531c50bb11447b43c02f52718f9213631921fad3" + version = "v0.3.3" [[projects]] branch = "master" + digest = "1:e700de914d366e75d5711582669407619dd26746cb80b0f6bd2cb0e8f0ec18c1" name = "github.com/serenize/snaker" packages = ["."] + pruneopts = "NUT" revision = "543781d2b79bd95c51ffe70e70a55c946ca211ff" [[projects]] branch = "master" + digest = "1:da6203ec5679b8d7af6fe2dbcd694694b9af883a6bae7f702ec5a11815301e94" name = "github.com/shurcooL/go" packages = ["indentwriter"] + pruneopts = "NUT" revision = "20b4b0a352116a106a505a8c528b6513e7e0d5c2" [[projects]] branch = "master" + digest = "1:81fc235daf173fbcabfda91e1d5d1d918a057f33e30b57dc72fa6091047ac718" name = "github.com/shurcooL/markdownfmt" packages = ["markdown"] + pruneopts = "NUT" revision = "10aae0a270abfb5d929ae6ca59c4b0ac0fa8f237" [[projects]] branch = "master" + digest = "1:400359f0b394fb168f4aee9621d42cc005810c6e462009d5fc76055d5e96dcf3" name = "github.com/shurcooL/sanitized_anchor_name" packages = ["."] + pruneopts = "NUT" revision = "1dba4b3954bc059efc3991ec364f9f9a35f597d2" [[projects]] branch = "master" + digest = "1:75d8ef7fd5eb922263b8c8f30aae17ddd6876ed3bc52c17d8406553d123f8770" name = "github.com/spf13/pflag" packages = ["."] + pruneopts = "NUT" revision = "2300d0f8576fe575f71aaa5b9bbe4e1b0dc2eb51" [[projects]] + digest = "1:b5c8b4a0ad5f65a85eb2a9f89e30c638ef8b99f8a3f078467cea778869757666" name = "github.com/stretchr/testify" packages = ["assert"] + pruneopts = "NUT" revision = "69483b4bd14f5845b5a1e55bca19e954e827f1d0" version = "v1.1.4" [[projects]] branch = "master" + digest = "1:9137e62a44c92cd9fe7d563b8480a6fbb2cd97815ef194a37696d9635a6a4056" name = "github.com/syndtr/goleveldb" packages = [ "leveldb", @@ -631,27 +791,33 @@ "leveldb/opt", "leveldb/storage", "leveldb/table", - "leveldb/util" + "leveldb/util", ] + pruneopts = "NUT" revision = "8c81ea47d4c41a385645e133e15510fc6a2a74b4" [[projects]] branch = "master" + digest = "1:af13b22439d69b477caf532439fdcbb3e2a18bde35d3135003f8be46bb8d8967" name = "github.com/ugorji/go" packages = ["codec"] + pruneopts = "NUT" revision = "708a42d246822952f38190a8d8c4e6b16a0e600c" [[projects]] branch = "master" + digest = "1:95100f4ac20072567aa10607f61c66ff28965659f77f6ba807bd875a685710d9" name = "golang.org/x/crypto" packages = [ "bcrypt", - "blowfish" + "blowfish", ] + pruneopts = "NUT" revision = "0242f07995e684be54f2a2776327141acf1cef91" [[projects]] branch = "master" + digest = "1:9c7239e01cf1289afb7460a80b820bcaa5638add06e5cf55e676950b414c817d" name = "golang.org/x/net" packages = [ "context", @@ -662,35 +828,41 @@ "internal/timeseries", "lex/httplex", "trace", - "websocket" + "websocket", ] + pruneopts = "NUT" revision = "d212a1ef2de2f5d441c327b8f26cf3ea3ea9f265" [[projects]] branch = "master" + digest = "1:da311e132160fec8dfc9e659915b8f942e5563c27bbf3c45d2c9e67a1434ef65" name = "golang.org/x/oauth2" packages = [ ".", "google", "internal", "jws", - "jwt" + "jwt", ] + pruneopts = "NUT" revision = "a6bd8cefa1811bd24b86f8902872e4e8225f74c4" [[projects]] branch = "master" + digest = "1:49763f4a63eaef31cde11bc013e9d250e294fb5fc8b02b9c93807fc430106cf9" name = "golang.org/x/sys" packages = [ "unix", "windows", "windows/registry", - "windows/svc/eventlog" + "windows/svc/eventlog", ] + pruneopts = "NUT" revision = "f3918c30c5c2cb527c0b071a27c35120a6c0719a" [[projects]] branch = "master" + digest = "1:35e546e3d2af2735e23a5698e0d5c87f7b0ca5683b3fbc25a10bcf397e032d19" name = "golang.org/x/text" packages = [ "internal/gen", @@ -702,22 +874,26 @@ "unicode/cldr", "unicode/norm", "unicode/rangetable", - "width" + "width", ] + pruneopts = "NUT" revision = "a9a820217f98f7c8a207ec1e45a874e1fe12c478" [[projects]] branch = "master" + digest = "1:9f32afa47f2da74cef7fd3ace0c5b8bf7476f432e4a1c7163ecdd22cf17154b8" name = "google.golang.org/api" packages = [ "compute/v1", "gensupport", "googleapi", - "googleapi/internal/uritemplates" + "googleapi/internal/uritemplates", ] + pruneopts = "NUT" revision = "fbbaff1827317122a8a0e1b24de25df8417ce87b" [[projects]] + digest = "1:7206d98ec77c90c72ec2c405181a1dcf86965803b6dbc4f98ceab7a5047c37a9" name = "google.golang.org/appengine" packages = [ ".", @@ -729,12 +905,14 @@ "internal/modules", "internal/remote_api", "internal/urlfetch", - "urlfetch" + "urlfetch", ] + pruneopts = "NUT" revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a" version = "v1.0.0" [[projects]] + digest = "1:73ac483b9160d55bbdaa4ca261234e391bd14ac69cfb172bead53e421197a0f0" name = "google.golang.org/grpc" packages = [ ".", @@ -748,42 +926,54 @@ "peer", "stats", "tap", - "transport" + "transport", ] + pruneopts = "NUT" revision = "8050b9cbc271307e5a716a9d782803d09b0d6f2d" version = "v1.2.1" [[projects]] branch = "v3" + digest = "1:1244a9b3856f70d5ffb74bbfd780fc9d47f93f2049fa265c6fb602878f507bf8" name = "gopkg.in/alexcesaro/quotedprintable.v3" packages = ["."] + pruneopts = "NUT" revision = "2caba252f4dc53eaf6b553000885530023f54623" [[projects]] + digest = "1:c970218a20933dd0a2eb2006de922217fa9276f57d25009b2a934eb1c50031cc" name = "gopkg.in/fsnotify.v1" packages = ["."] + pruneopts = "NUT" revision = "629574ca2a5df945712d3079857300b5e4da0236" source = "git@github.com:fsnotify/fsnotify" [[projects]] + digest = "1:d852dd703c644c976246382fe1539e8585cc20d642d3e68d3dff8de952237497" name = "gopkg.in/gomail.v2" packages = ["."] + pruneopts = "NUT" revision = "41f3572897373c5538c50a2402db15db079fa4fd" version = "2.0.0" [[projects]] + digest = "1:ef72505cf098abdd34efeea032103377bec06abb61d8a06f002d5d296a4b1185" name = "gopkg.in/inf.v0" packages = ["."] + pruneopts = "NUT" revision = "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4" version = "v0.9.0" [[projects]] branch = "v2" + digest = "1:ad6f94355d292690137613735965bd3688844880fdab90eccf66321910344942" name = "gopkg.in/yaml.v2" packages = ["."] + pruneopts = "NUT" revision = "a5b47d31c556af34a302ce5d659e6fea44d90de0" [[projects]] + digest = "1:250d6f7f059d3ebceea5e6deed3561878b0c9807d5c17d58f7ca1890e4193f88" name = "k8s.io/client-go" packages = [ "1.5/discovery", @@ -888,14 +1078,82 @@ "1.5/tools/cache", "1.5/tools/clientcmd/api", "1.5/tools/metrics", - "1.5/transport" + "1.5/transport", ] + pruneopts = "NUT" revision = "1195e3a8ee1a529d53eed7c624527a68555ddf1f" version = "v1.5.1" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "56af1e788a09d8f7de6794281d692aa2de5b7d4e8c930e67c33f05e902300389" + input-imports = [ + "github.com/BurntSushi/toml", + "github.com/aws/aws-sdk-go/aws", + "github.com/aws/aws-sdk-go/aws/awserr", + "github.com/aws/aws-sdk-go/aws/credentials", + "github.com/aws/aws-sdk-go/aws/session", + "github.com/aws/aws-sdk-go/service/autoscaling", + "github.com/benbjohnson/tmpl", + "github.com/boltdb/bolt", + "github.com/cenkalti/backoff", + "github.com/davecgh/go-spew/spew", + "github.com/dgrijalva/jwt-go", + "github.com/docker/docker/api/types", + "github.com/docker/docker/api/types/swarm", + "github.com/dustin/go-humanize", + "github.com/eclipse/paho.mqtt.golang", + "github.com/evanphx/json-patch", + "github.com/ghodss/yaml", + "github.com/golang/protobuf/proto", + "github.com/golang/protobuf/protoc-gen-go", + "github.com/google/go-cmp/cmp", + "github.com/google/go-cmp/cmp/cmpopts", + "github.com/google/uuid", + "github.com/gorhill/cronexpr", + "github.com/influxdata/influxdb", + "github.com/influxdata/influxdb/client", + "github.com/influxdata/influxdb/client/v2", + "github.com/influxdata/influxdb/influxql", + "github.com/influxdata/influxdb/models", + "github.com/influxdata/influxdb/services/collectd", + "github.com/influxdata/influxdb/services/graphite", + "github.com/influxdata/influxdb/services/meta", + "github.com/influxdata/influxdb/services/opentsdb", + "github.com/influxdata/influxdb/services/udp", + "github.com/influxdata/influxdb/toml", + "github.com/influxdata/influxdb/uuid", + "github.com/influxdata/usage-client/v1", + "github.com/influxdata/wlog", + "github.com/k-sone/snmpgo", + "github.com/mailru/easyjson", + "github.com/mailru/easyjson/easyjson", + "github.com/mailru/easyjson/jlexer", + "github.com/mailru/easyjson/jwriter", + "github.com/mitchellh/copystructure", + "github.com/mitchellh/mapstructure", + "github.com/mitchellh/reflectwalk", + "github.com/pkg/errors", + "github.com/prometheus/common/log", + "github.com/prometheus/common/model", + "github.com/prometheus/prometheus/config", + "github.com/prometheus/prometheus/discovery", + "github.com/prometheus/prometheus/discovery/azure", + "github.com/prometheus/prometheus/discovery/consul", + "github.com/prometheus/prometheus/discovery/dns", + "github.com/prometheus/prometheus/discovery/ec2", + "github.com/prometheus/prometheus/discovery/file", + "github.com/prometheus/prometheus/discovery/gce", + "github.com/prometheus/prometheus/discovery/marathon", + "github.com/prometheus/prometheus/discovery/triton", + "github.com/prometheus/prometheus/discovery/zookeeper", + "github.com/prometheus/prometheus/retrieval", + "github.com/prometheus/prometheus/storage", + "github.com/segmentio/kafka-go", + "github.com/serenize/snaker", + "github.com/shurcooL/markdownfmt/markdown", + "github.com/stretchr/testify/assert", + "gopkg.in/gomail.v2", + ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/vendor/github.com/mailru/easyjson/parser/parser_windows.go b/vendor/github.com/mailru/easyjson/parser/parser_windows.go index a59c3e2c2..7c26f142a 100644 --- a/vendor/github.com/mailru/easyjson/parser/parser_windows.go +++ b/vendor/github.com/mailru/easyjson/parser/parser_windows.go @@ -9,7 +9,7 @@ import ( ) func normalizePath(path string) string { - // use lower case, as Windows file systems will almost always be case insensitive + // use lower case, as Windows file systems will almost always be case insensitive return strings.ToLower(strings.Replace(path, "\\", "/", -1)) } diff --git a/vendor/github.com/segmentio/kafka-go/balancer.go b/vendor/github.com/segmentio/kafka-go/balancer.go index cac92417e..467c52185 100644 --- a/vendor/github.com/segmentio/kafka-go/balancer.go +++ b/vendor/github.com/segmentio/kafka-go/balancer.go @@ -2,7 +2,9 @@ package kafka import ( "hash" + "hash/crc32" "hash/fnv" + "math/rand" "sort" "sync" ) @@ -132,15 +134,23 @@ var ( type Hash struct { rr RoundRobin Hasher hash.Hash32 + + // lock protects Hasher while calculating the hash code. It is assumed that + // the Hasher field is read-only once the Balancer is created, so as a + // performance optimization, reads of the field are not protected. + lock sync.Mutex } -func (h *Hash) Balance(msg Message, partitions ...int) (partition int) { +func (h *Hash) Balance(msg Message, partitions ...int) int { if msg.Key == nil { return h.rr.Balance(msg, partitions...) } hasher := h.Hasher - if hasher == nil { + if hasher != nil { + h.lock.Lock() + defer h.lock.Unlock() + } else { hasher = fnv1aPool.Get().(hash.Hash32) defer fnv1aPool.Put(hasher) } @@ -151,10 +161,137 @@ func (h *Hash) Balance(msg Message, partitions ...int) (partition int) { } // uses same algorithm that Sarama's hashPartitioner uses - partition = int(hasher.Sum32()) % len(partitions) + // note the type conversions here. if the uint32 hash code is not cast to + // an int32, we do not get the same result as sarama. + partition := int32(hasher.Sum32()) % int32(len(partitions)) if partition < 0 { partition = -partition } - return + return int(partition) +} + +type randomBalancer struct { + mock int // mocked return value, used for testing +} + +func (b randomBalancer) Balance(msg Message, partitions ...int) (partition int) { + if b.mock != 0 { + return b.mock + } + return partitions[rand.Int()%len(partitions)] +} + +// CRC32Balancer is a Balancer that uses the CRC32 hash function to determine +// which partition to route messages to. This ensures that messages with the +// same key are routed to the same partition. This balancer is compatible with +// the built-in hash partitioners in librdkafka and the language bindings that +// are built on top of it, including the +// github.com/confluentinc/confluent-kafka-go Go package. +// +// With the Consistent field false (default), this partitioner is equivalent to +// the "consistent_random" setting in librdkafka. When Consistent is true, this +// partitioner is equivalent to the "consistent" setting. The latter will hash +// empty or nil keys into the same partition. +// +// Unless you are absolutely certain that all your messages will have keys, it's +// best to leave the Consistent flag off. Otherwise, you run the risk of +// creating a very hot partition. +type CRC32Balancer struct { + Consistent bool + random randomBalancer +} + +func (b CRC32Balancer) Balance(msg Message, partitions ...int) (partition int) { + // NOTE: the crc32 balancers in librdkafka don't differentiate between nil + // and empty keys. both cases are treated as unset. + if len(msg.Key) == 0 && !b.Consistent { + return b.random.Balance(msg, partitions...) + } + + idx := crc32.ChecksumIEEE(msg.Key) % uint32(len(partitions)) + return partitions[idx] +} + +// Murmur2Balancer is a Balancer that uses the Murmur2 hash function to +// determine which partition to route messages to. This ensures that messages +// with the same key are routed to the same partition. This balancer is +// compatible with the partitioner used by the Java library and by librdkafka's +// "murmur2" and "murmur2_random" partitioners. / +// +// With the Consistent field false (default), this partitioner is equivalent to +// the "murmur2_random" setting in librdkafka. When Consistent is true, this +// partitioner is equivalent to the "murmur2" setting. The latter will hash +// nil keys into the same partition. Empty, non-nil keys are always hashed to +// the same partition regardless of configuration. +// +// Unless you are absolutely certain that all your messages will have keys, it's +// best to leave the Consistent flag off. Otherwise, you run the risk of +// creating a very hot partition. +// +// Note that the librdkafka documentation states that the "murmur2_random" is +// functionally equivalent to the default Java partitioner. That's because the +// Java partitioner will use a round robin balancer instead of random on nil +// keys. We choose librdkafka's implementation because it arguably has a larger +// install base. +type Murmur2Balancer struct { + Consistent bool + random randomBalancer +} + +func (b Murmur2Balancer) Balance(msg Message, partitions ...int) (partition int) { + // NOTE: the murmur2 balancers in java and librdkafka treat a nil key as + // non-existent while treating an empty slice as a defined value. + if msg.Key == nil && !b.Consistent { + return b.random.Balance(msg, partitions...) + } + + idx := (murmur2(msg.Key) & 0x7fffffff) % uint32(len(partitions)) + return partitions[idx] +} + +// Go port of the Java library's murmur2 function. +// https://github.com/apache/kafka/blob/1.0/clients/src/main/java/org/apache/kafka/common/utils/Utils.java#L353 +func murmur2(data []byte) uint32 { + length := len(data) + const ( + seed uint32 = 0x9747b28c + // 'm' and 'r' are mixing constants generated offline. + // They're not really 'magic', they just happen to work well. + m = 0x5bd1e995 + r = 24 + ) + + // Initialize the hash to a random value + h := seed ^ uint32(length) + length4 := length / 4 + + for i := 0; i < length4; i++ { + i4 := i * 4 + k := (uint32(data[i4+0]) & 0xff) + ((uint32(data[i4+1]) & 0xff) << 8) + ((uint32(data[i4+2]) & 0xff) << 16) + ((uint32(data[i4+3]) & 0xff) << 24) + k *= m + k ^= k >> r + k *= m + h *= m + h ^= k + } + + // Handle the last few bytes of the input array + extra := length % 4 + if extra >= 3 { + h ^= (uint32(data[(length & ^3)+2]) & 0xff) << 16 + } + if extra >= 2 { + h ^= (uint32(data[(length & ^3)+1]) & 0xff) << 8 + } + if extra >= 1 { + h ^= uint32(data[length & ^3]) & 0xff + h *= m + } + + h ^= h >> 13 + h *= m + h ^= h >> 15 + + return h } diff --git a/vendor/github.com/segmentio/kafka-go/batch.go b/vendor/github.com/segmentio/kafka-go/batch.go index 227024e85..8fc6c4e33 100644 --- a/vendor/github.com/segmentio/kafka-go/batch.go +++ b/vendor/github.com/segmentio/kafka-go/batch.go @@ -20,10 +20,9 @@ type Batch struct { mutex sync.Mutex conn *Conn lock *sync.Mutex - reader *bufio.Reader + msgs *messageSetReader deadline time.Time throttle time.Duration - remain int topic string partition int offset int64 @@ -65,7 +64,9 @@ func (batch *Batch) close() (err error) { batch.conn = nil batch.lock = nil - batch.discard(batch.remain) + if batch.msgs != nil { + batch.msgs.discard() + } if err = batch.err; err == io.EOF { err = nil @@ -91,6 +92,19 @@ func (batch *Batch) close() (err error) { return } +// Err returns a non-nil error if the batch is broken. This is the same error +// that would be returned by Read, ReadMessage or Close (except in the case of +// io.EOF which is never returned by Close). +// +// This method is useful when building retry mechanisms for (*Conn).ReadBatch, +// the program can check whether the batch carried a error before attempting to +// read the first message. +// +// Note that checking errors on a batch is optional, calling Read or ReadMessage +// is always valid and can be used to either read a message or an error in cases +// where that's convenient. +func (batch *Batch) Err() error { return batch.err } + // Read reads the value of the next message from the batch into b, returning the // number of bytes read, or an error if the next message couldn't be read. // @@ -107,7 +121,7 @@ func (batch *Batch) Read(b []byte) (int, error) { batch.mutex.Lock() offset := batch.offset - _, _, err := batch.readMessage( + _, _, _, err := batch.readMessage( func(r *bufio.Reader, size int, nbytes int) (int, error) { if nbytes < 0 { return size, nil @@ -118,9 +132,17 @@ func (batch *Batch) Read(b []byte) (int, error) { if nbytes < 0 { return size, nil } + // make sure there are enough bytes for the message value. return + // errShortRead if the message is truncated. + if nbytes > size { + return size, errShortRead + } n = nbytes // return value + if nbytes > cap(b) { + nbytes = cap(b) + } if nbytes > len(b) { - nbytes = len(b) + b = b[:nbytes] } nbytes, err := io.ReadFull(r, b[:nbytes]) if err != nil { @@ -149,7 +171,11 @@ func (batch *Batch) ReadMessage() (Message, error) { msg := Message{} batch.mutex.Lock() - offset, timestamp, err := batch.readMessage( + var offset, timestamp int64 + var headers []Header + var err error + + offset, timestamp, headers, err = batch.readMessage( func(r *bufio.Reader, size int, nbytes int) (remain int, err error) { msg.Key, remain, err = readNewBytes(r, size, nbytes) return @@ -159,30 +185,41 @@ func (batch *Batch) ReadMessage() (Message, error) { return }, ) + for batch.conn != nil && offset < batch.conn.offset { + if err != nil { + break + } + offset, timestamp, headers, err = batch.readMessage( + func(r *bufio.Reader, size int, nbytes int) (remain int, err error) { + msg.Key, remain, err = readNewBytes(r, size, nbytes) + return + }, + func(r *bufio.Reader, size int, nbytes int) (remain int, err error) { + msg.Value, remain, err = readNewBytes(r, size, nbytes) + return + }, + ) + } batch.mutex.Unlock() msg.Topic = batch.topic msg.Partition = batch.partition msg.Offset = offset msg.Time = timestampToTime(timestamp) + msg.Headers = headers + return msg, err } func (batch *Batch) readMessage( key func(*bufio.Reader, int, int) (int, error), val func(*bufio.Reader, int, int) (int, error), -) (offset int64, timestamp int64, err error) { +) (offset int64, timestamp int64, headers []Header, err error) { if err = batch.err; err != nil { return } - offset, timestamp, batch.remain, err = readMessage( - batch.reader, - batch.remain, - batch.offset, - key, val, - ) - + offset, timestamp, headers, err = batch.msgs.readMessage(batch.offset, key, val) switch err { case nil: batch.offset = offset + 1 @@ -190,7 +227,21 @@ func (batch *Batch) readMessage( // As an "optimization" kafka truncates the returned response after // producing MaxBytes, which could then cause the code to return // errShortRead. - err = batch.discard(batch.remain) + err = batch.msgs.discard() + switch { + case err != nil: + batch.err = err + case batch.msgs.remaining() == 0: + // Because we use the adjusted deadline we could end up returning + // before the actual deadline occurred. This is necessary otherwise + // timing out the connection for real could end up leaving it in an + // unpredictable state, which would require closing it. + // This design decision was made to maximize the chances of keeping + // the connection open, the trade off being to lose precision on the + // read deadline management. + err = checkTimeoutErr(batch.deadline) + batch.err = err + } default: batch.err = err } @@ -198,25 +249,11 @@ func (batch *Batch) readMessage( return } -func (batch *Batch) discard(n int) (err error) { - batch.remain, err = discardN(batch.reader, batch.remain, n) - switch { - case err != nil: - batch.err = err - case batch.err == nil && batch.remain == 0: - // Because we use the adjusted deadline we could end up returning - // before the actual deadline occurred. This is necessary otherwise - // timing out the connection for real could end up leaving it in an - // unpredictable state, which would require closing it. - // This design decision was main to maximize the changes of keeping - // the connection open, the trade off being to lose precision on the - // read deadline management. - if !batch.deadline.IsZero() && time.Now().After(batch.deadline) { - err = RequestTimedOut - } else { - err = io.EOF - } - batch.err = err +func checkTimeoutErr(deadline time.Time) (err error) { + if !deadline.IsZero() && time.Now().After(deadline) { + err = RequestTimedOut + } else { + err = io.EOF } return } diff --git a/vendor/github.com/segmentio/kafka-go/buffer.go b/vendor/github.com/segmentio/kafka-go/buffer.go new file mode 100644 index 000000000..5bf50c05f --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/buffer.go @@ -0,0 +1,27 @@ +package kafka + +import ( + "bytes" + "sync" +) + +var bufferPool = sync.Pool{ + New: func() interface{} { return newBuffer() }, +} + +func newBuffer() *bytes.Buffer { + b := new(bytes.Buffer) + b.Grow(65536) + return b +} + +func acquireBuffer() *bytes.Buffer { + return bufferPool.Get().(*bytes.Buffer) +} + +func releaseBuffer(b *bytes.Buffer) { + if b != nil { + b.Reset() + bufferPool.Put(b) + } +} diff --git a/vendor/github.com/segmentio/kafka-go/client.go b/vendor/github.com/segmentio/kafka-go/client.go new file mode 100644 index 000000000..e4a8f6474 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/client.go @@ -0,0 +1,165 @@ +package kafka + +import ( + "context" + "fmt" +) + +// Client is a new and experimental API for kafka-go. It is expected that this API will grow over time, +// and offer a new set of "mid-level" capabilities. Specifically, it is expected Client will be a higher level API than Conn, +// yet provide more control and lower level operations than the Reader and Writer APIs. +// +// N.B Client is currently experimental! Therefore, it is subject to change, including breaking changes +// between MINOR and PATCH releases. +type Client struct { + brokers []string + dialer *Dialer +} + +// Configuration for Client +// +// N.B ClientConfig is currently experimental! Therefore, it is subject to change, including breaking changes +// between MINOR and PATCH releases. +type ClientConfig struct { + // List of broker strings in the format : + // to use for bootstrap connecting to cluster + Brokers []string + // Dialer used for connecting to the Cluster + Dialer *Dialer +} + +// A ConsumerGroup and Topic as these are both strings +// we define a type for clarity when passing to the Client +// as a function argument +// +// N.B TopicAndGroup is currently experimental! Therefore, it is subject to change, including breaking changes +// between MINOR and PATCH releases. +type TopicAndGroup struct { + Topic string + GroupId string +} + +// NewClient creates and returns a *Client taking ...string of bootstrap +// brokers for connecting to the cluster. +func NewClient(brokers ...string) *Client { + return NewClientWith(ClientConfig{Brokers: brokers, Dialer: DefaultDialer}) +} + +// NewClientWith creates and returns a *Client. For safety, it copies the []string of bootstrap +// brokers for connecting to the cluster and uses the user supplied Dialer. +// In the event the Dialer is nil, we use the DefaultDialer. +func NewClientWith(config ClientConfig) *Client { + if len(config.Brokers) == 0 { + panic("must provide at least one broker") + } + + b := make([]string, len(config.Brokers)) + copy(b, config.Brokers) + d := config.Dialer + if d == nil { + d = DefaultDialer + } + + return &Client{ + brokers: b, + dialer: d, + } +} + +// ConsumerOffsets returns a map[int]int64 of partition to committed offset for a consumer group id and topic +func (c *Client) ConsumerOffsets(ctx context.Context, tg TopicAndGroup) (map[int]int64, error) { + address, err := c.lookupCoordinator(tg.GroupId) + if err != nil { + return nil, err + } + + conn, err := c.coordinator(ctx, address) + if err != nil { + return nil, err + } + + defer conn.Close() + partitions, err := conn.ReadPartitions(tg.Topic) + if err != nil { + return nil, err + } + + var parts []int32 + for _, p := range partitions { + parts = append(parts, int32(p.ID)) + } + + offsets, err := conn.offsetFetch(offsetFetchRequestV1{ + GroupID: tg.GroupId, + Topics: []offsetFetchRequestV1Topic{ + { + Topic: tg.Topic, + Partitions: parts, + }, + }, + }) + + if err != nil { + return nil, err + } + + if len(offsets.Responses) != 1 { + return nil, fmt.Errorf("error fetching offsets, no responses received") + } + + offsetsByPartition := map[int]int64{} + for _, pr := range offsets.Responses[0].PartitionResponses { + offset := pr.Offset + if offset < 0 { + // No offset stored + // -1 indicates that there is no offset saved for the partition. + // If we returned a -1 here the user might interpret that as LastOffset + // so we set to Firstoffset for safety. + // See http://kafka.apache.org/protocol.html#The_Messages_OffsetFetch + offset = FirstOffset + } + offsetsByPartition[int(pr.Partition)] = offset + } + + return offsetsByPartition, nil +} + +// connect returns a connection to ANY broker +func (c *Client) connect() (conn *Conn, err error) { + for _, broker := range c.brokers { + if conn, err = c.dialer.Dial("tcp", broker); err == nil { + return + } + } + return // err will be non-nil +} + +// coordinator returns a connection to a coordinator +func (c *Client) coordinator(ctx context.Context, address string) (*Conn, error) { + conn, err := c.dialer.DialContext(ctx, "tcp", address) + if err != nil { + return nil, fmt.Errorf("unable to connect to coordinator, %v", address) + } + + return conn, nil +} + +// lookupCoordinator scans the brokers and looks up the address of the +// coordinator for the groupId. +func (c *Client) lookupCoordinator(groupId string) (string, error) { + conn, err := c.connect() + if err != nil { + return "", fmt.Errorf("unable to find coordinator to any connect for group, %v: %v\n", groupId, err) + } + defer conn.Close() + + out, err := conn.findCoordinator(findCoordinatorRequestV0{ + CoordinatorKey: groupId, + }) + if err != nil { + return "", fmt.Errorf("unable to find coordinator for group, %v: %v", groupId, err) + } + + address := fmt.Sprintf("%v:%v", out.Coordinator.Host, out.Coordinator.Port) + return address, nil +} diff --git a/vendor/github.com/segmentio/kafka-go/compression.go b/vendor/github.com/segmentio/kafka-go/compression.go new file mode 100644 index 000000000..c415ff436 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/compression.go @@ -0,0 +1,58 @@ +package kafka + +import ( + "errors" + "io" + "sync" +) + +const ( + compressionCodecMask = 0x07 +) + +var ( + errUnknownCodec = errors.New("the compression code is invalid or its codec has not been imported") + + codecs = make(map[int8]CompressionCodec) + codecsMutex sync.RWMutex +) + +// RegisterCompressionCodec registers a compression codec so it can be used by a Writer. +func RegisterCompressionCodec(codec CompressionCodec) { + code := codec.Code() + codecsMutex.Lock() + codecs[code] = codec + codecsMutex.Unlock() +} + +// resolveCodec looks up a codec by Code() +func resolveCodec(code int8) (codec CompressionCodec, err error) { + codecsMutex.RLock() + codec = codecs[code] + codecsMutex.RUnlock() + + if codec == nil { + err = errUnknownCodec + } + return +} + +// CompressionCodec represents a compression codec to encode and decode +// the messages. +// See : https://cwiki.apache.org/confluence/display/KAFKA/Compression +// +// A CompressionCodec must be safe for concurrent access by multiple go +// routines. +type CompressionCodec interface { + // Code returns the compression codec code + Code() int8 + + // Human-readable name for the codec. + Name() string + + // Constructs a new reader which decompresses data from r. + NewReader(r io.Reader) io.ReadCloser + + // Constructs a new writer which writes compressed data to w. + NewWriter(w io.Writer) io.WriteCloser +} diff --git a/vendor/github.com/segmentio/kafka-go/conn.go b/vendor/github.com/segmentio/kafka-go/conn.go index c6d829d1c..1b6e7649f 100644 --- a/vendor/github.com/segmentio/kafka-go/conn.go +++ b/vendor/github.com/segmentio/kafka-go/conn.go @@ -4,6 +4,7 @@ import ( "bufio" "errors" "fmt" + "io" "math" "net" "os" @@ -24,6 +25,7 @@ type Broker struct { Host string Port int ID int + Rack string } // Partition carries the metadata associated with a kafka partition. @@ -53,6 +55,7 @@ type Conn struct { // write buffer (synchronized on wlock) wlock sync.Mutex wbuf bufio.Writer + wb writeBuffer // deadline management wdeadline connDeadline @@ -69,7 +72,12 @@ type Conn struct { correlationID int32 // number of replica acks required when publishing to a partition - requiredAcks int32 + requiredAcks int32 + apiVersions map[apiKey]ApiVersion + fetchVersion apiVersion + produceVersion apiVersion + + transactionalID *string } // ConnConfig is a configuration object used to create new instances of Conn. @@ -77,8 +85,32 @@ type ConnConfig struct { ClientID string Topic string Partition int + + // The transactional id to use for transactional delivery. Idempotent + // deliver should be enabled if transactional id is configured. + // For more details look at transactional.id description here: http://kafka.apache.org/documentation.html#producerconfigs + // Empty string means that this connection can't be transactional. + TransactionalID string } +// ReadBatchConfig is a configuration object used for reading batches of messages. +type ReadBatchConfig struct { + MinBytes int + MaxBytes int + + // IsolationLevel controls the visibility of transactional records. + // ReadUncommitted makes all records visible. With ReadCommitted only + // non-transactional and committed records are visible. + IsolationLevel IsolationLevel +} + +type IsolationLevel int8 + +const ( + ReadUncommitted IsolationLevel = 0 + ReadCommitted IsolationLevel = 1 +) + var ( // DefaultClientID is the default value used as ClientID of kafka // connections. @@ -99,7 +131,15 @@ func NewConn(conn net.Conn, topic string, partition int) *Conn { }) } +func emptyToNullable(transactionalID string) (result *string) { + if transactionalID != "" { + result = &transactionalID + } + return result +} + // NewConnWith returns a new kafka connection configured with config. +// The offset is initialized to FirstOffset. func NewConnWith(conn net.Conn, config ConnConfig) *Conn { if len(config.ClientID) == 0 { config.ClientID = DefaultClientID @@ -110,35 +150,127 @@ func NewConnWith(conn net.Conn, config ConnConfig) *Conn { } c := &Conn{ - conn: conn, - rbuf: *bufio.NewReader(conn), - wbuf: *bufio.NewWriter(conn), - clientID: config.ClientID, - topic: config.Topic, - partition: int32(config.Partition), - offset: -2, - requiredAcks: -1, + conn: conn, + rbuf: *bufio.NewReader(conn), + wbuf: *bufio.NewWriter(conn), + clientID: config.ClientID, + topic: config.Topic, + partition: int32(config.Partition), + offset: FirstOffset, + requiredAcks: -1, + transactionalID: emptyToNullable(config.TransactionalID), } + c.wb.w = &c.wbuf + // The fetch request needs to ask for a MaxBytes value that is at least // enough to load the control data of the response. To avoid having to // recompute it on every read, it is cached here in the Conn value. - c.fetchMinSize = (fetchResponseV1{ - Topics: []fetchResponseTopicV1{{ + c.fetchMinSize = (fetchResponseV2{ + Topics: []fetchResponseTopicV2{{ TopicName: config.Topic, - Partitions: []fetchResponsePartitionV1{{ + Partitions: []fetchResponsePartitionV2{{ Partition: int32(config.Partition), MessageSet: messageSet{{}}, }}, }}, }).size() + c.selectVersions() c.fetchMaxBytes = math.MaxInt32 - c.fetchMinSize return c } +func (c *Conn) selectVersions() { + var err error + apiVersions, err := c.ApiVersions() + if err != nil { + c.apiVersions = defaultApiVersions + } else { + c.apiVersions = make(map[apiKey]ApiVersion) + for _, v := range apiVersions { + c.apiVersions[apiKey(v.ApiKey)] = v + } + } + for _, v := range c.apiVersions { + if apiKey(v.ApiKey) == fetchRequest { + switch version := v.MaxVersion; { + case version >= 10: + c.fetchVersion = 10 + case version >= 5: + c.fetchVersion = 5 + default: + c.fetchVersion = 2 + } + } + if apiKey(v.ApiKey) == produceRequest { + if v.MaxVersion >= 7 { + c.produceVersion = 7 + } else { + c.produceVersion = 2 + } + } + } +} + +// Controller requests kafka for the current controller and returns its URL +func (c *Conn) Controller() (broker Broker, err error) { + err = c.readOperation( + func(deadline time.Time, id int32) error { + return c.writeRequest(metadataRequest, v1, id, topicMetadataRequestV1([]string{})) + }, + func(deadline time.Time, size int) error { + var res metadataResponseV1 + + if err := c.readResponse(size, &res); err != nil { + return err + } + for _, brokerMeta := range res.Brokers { + if brokerMeta.NodeID == res.ControllerID { + broker = Broker{ID: int(brokerMeta.NodeID), + Port: int(brokerMeta.Port), + Host: brokerMeta.Host, + Rack: brokerMeta.Rack} + break + } + } + return nil + }, + ) + return broker, err +} + +// Brokers retrieve the broker list from the Kafka metadata +func (c *Conn) Brokers() ([]Broker, error) { + var brokers []Broker + err := c.readOperation( + func(deadline time.Time, id int32) error { + return c.writeRequest(metadataRequest, v1, id, topicMetadataRequestV1([]string{})) + }, + func(deadline time.Time, size int) error { + var res metadataResponseV1 + + if err := c.readResponse(size, &res); err != nil { + return err + } + + brokers = make([]Broker, len(res.Brokers)) + for i, brokerMeta := range res.Brokers { + brokers[i] = Broker{ + ID: int(brokerMeta.NodeID), + Port: int(brokerMeta.Port), + Host: brokerMeta.Host, + Rack: brokerMeta.Rack, + } + } + return nil + }, + ) + return brokers, err +} + // DeleteTopics deletes the specified topics. func (c *Conn) DeleteTopics(topics ...string) error { - _, err := c.deleteTopics(deleteTopicsRequestV1{ + _, err := c.deleteTopics(deleteTopicsRequestV0{ Topics: topics, }) return err @@ -147,12 +279,12 @@ func (c *Conn) DeleteTopics(topics ...string) error { // describeGroups retrieves the specified groups // // See http://kafka.apache.org/protocol.html#The_Messages_DescribeGroups -func (c *Conn) describeGroups(request describeGroupsRequestV1) (describeGroupsResponseV1, error) { - var response describeGroupsResponseV1 +func (c *Conn) describeGroups(request describeGroupsRequestV0) (describeGroupsResponseV0, error) { + var response describeGroupsResponseV0 err := c.readOperation( func(deadline time.Time, id int32) error { - return c.writeRequest(describeGroupsRequest, v1, id, request) + return c.writeRequest(describeGroupsRequest, v0, id, request) }, func(deadline time.Time, size int) error { return expectZeroSize(func() (remain int, err error) { @@ -161,11 +293,11 @@ func (c *Conn) describeGroups(request describeGroupsRequestV1) (describeGroupsRe }, ) if err != nil { - return describeGroupsResponseV1{}, err + return describeGroupsResponseV0{}, err } for _, group := range response.Groups { if group.ErrorCode != 0 { - return describeGroupsResponseV1{}, Error(group.ErrorCode) + return describeGroupsResponseV0{}, Error(group.ErrorCode) } } @@ -175,12 +307,13 @@ func (c *Conn) describeGroups(request describeGroupsRequestV1) (describeGroupsRe // findCoordinator finds the coordinator for the specified group or transaction // // See http://kafka.apache.org/protocol.html#The_Messages_FindCoordinator -func (c *Conn) findCoordinator(request findCoordinatorRequestV1) (findCoordinatorResponseV1, error) { - var response findCoordinatorResponseV1 +func (c *Conn) findCoordinator(request findCoordinatorRequestV0) (findCoordinatorResponseV0, error) { + var response findCoordinatorResponseV0 err := c.readOperation( func(deadline time.Time, id int32) error { - return c.writeRequest(groupCoordinatorRequest, v1, id, request) + return c.writeRequest(groupCoordinatorRequest, v0, id, request) + }, func(deadline time.Time, size int) error { return expectZeroSize(func() (remain int, err error) { @@ -189,10 +322,10 @@ func (c *Conn) findCoordinator(request findCoordinatorRequestV1) (findCoordinato }, ) if err != nil { - return findCoordinatorResponseV1{}, err + return findCoordinatorResponseV0{}, err } if response.ErrorCode != 0 { - return findCoordinatorResponseV1{}, Error(response.ErrorCode) + return findCoordinatorResponseV0{}, Error(response.ErrorCode) } return response, nil @@ -201,12 +334,12 @@ func (c *Conn) findCoordinator(request findCoordinatorRequestV1) (findCoordinato // heartbeat sends a heartbeat message required by consumer groups // // See http://kafka.apache.org/protocol.html#The_Messages_Heartbeat -func (c *Conn) heartbeat(request heartbeatRequestV1) (heartbeatResponseV1, error) { - var response heartbeatResponseV1 +func (c *Conn) heartbeat(request heartbeatRequestV0) (heartbeatResponseV0, error) { + var response heartbeatResponseV0 err := c.writeOperation( func(deadline time.Time, id int32) error { - return c.writeRequest(heartbeatRequest, v1, id, request) + return c.writeRequest(heartbeatRequest, v0, id, request) }, func(deadline time.Time, size int) error { return expectZeroSize(func() (remain int, err error) { @@ -215,10 +348,10 @@ func (c *Conn) heartbeat(request heartbeatRequestV1) (heartbeatResponseV1, error }, ) if err != nil { - return heartbeatResponseV1{}, err + return heartbeatResponseV0{}, err } if response.ErrorCode != 0 { - return heartbeatResponseV1{}, Error(response.ErrorCode) + return heartbeatResponseV0{}, Error(response.ErrorCode) } return response, nil @@ -227,12 +360,12 @@ func (c *Conn) heartbeat(request heartbeatRequestV1) (heartbeatResponseV1, error // joinGroup attempts to join a consumer group // // See http://kafka.apache.org/protocol.html#The_Messages_JoinGroup -func (c *Conn) joinGroup(request joinGroupRequestV2) (joinGroupResponseV2, error) { - var response joinGroupResponseV2 +func (c *Conn) joinGroup(request joinGroupRequestV1) (joinGroupResponseV1, error) { + var response joinGroupResponseV1 err := c.writeOperation( func(deadline time.Time, id int32) error { - return c.writeRequest(joinGroupRequest, v2, id, request) + return c.writeRequest(joinGroupRequest, v1, id, request) }, func(deadline time.Time, size int) error { return expectZeroSize(func() (remain int, err error) { @@ -241,10 +374,10 @@ func (c *Conn) joinGroup(request joinGroupRequestV2) (joinGroupResponseV2, error }, ) if err != nil { - return joinGroupResponseV2{}, err + return joinGroupResponseV1{}, err } if response.ErrorCode != 0 { - return joinGroupResponseV2{}, Error(response.ErrorCode) + return joinGroupResponseV1{}, Error(response.ErrorCode) } return response, nil @@ -253,12 +386,12 @@ func (c *Conn) joinGroup(request joinGroupRequestV2) (joinGroupResponseV2, error // leaveGroup leaves the consumer from the consumer group // // See http://kafka.apache.org/protocol.html#The_Messages_LeaveGroup -func (c *Conn) leaveGroup(request leaveGroupRequestV1) (leaveGroupResponseV1, error) { - var response leaveGroupResponseV1 +func (c *Conn) leaveGroup(request leaveGroupRequestV0) (leaveGroupResponseV0, error) { + var response leaveGroupResponseV0 err := c.writeOperation( func(deadline time.Time, id int32) error { - return c.writeRequest(leaveGroupRequest, v1, id, request) + return c.writeRequest(leaveGroupRequest, v0, id, request) }, func(deadline time.Time, size int) error { return expectZeroSize(func() (remain int, err error) { @@ -267,10 +400,10 @@ func (c *Conn) leaveGroup(request leaveGroupRequestV1) (leaveGroupResponseV1, er }, ) if err != nil { - return leaveGroupResponseV1{}, err + return leaveGroupResponseV0{}, err } if response.ErrorCode != 0 { - return leaveGroupResponseV1{}, Error(response.ErrorCode) + return leaveGroupResponseV0{}, Error(response.ErrorCode) } return response, nil @@ -305,12 +438,12 @@ func (c *Conn) listGroups(request listGroupsRequestV1) (listGroupsResponseV1, er // offsetCommit commits the specified topic partition offsets // // See http://kafka.apache.org/protocol.html#The_Messages_OffsetCommit -func (c *Conn) offsetCommit(request offsetCommitRequestV3) (offsetCommitResponseV3, error) { - var response offsetCommitResponseV3 +func (c *Conn) offsetCommit(request offsetCommitRequestV2) (offsetCommitResponseV2, error) { + var response offsetCommitResponseV2 err := c.writeOperation( func(deadline time.Time, id int32) error { - return c.writeRequest(offsetCommitRequest, v3, id, request) + return c.writeRequest(offsetCommitRequest, v2, id, request) }, func(deadline time.Time, size int) error { return expectZeroSize(func() (remain int, err error) { @@ -319,12 +452,12 @@ func (c *Conn) offsetCommit(request offsetCommitRequestV3) (offsetCommitResponse }, ) if err != nil { - return offsetCommitResponseV3{}, err + return offsetCommitResponseV2{}, err } for _, r := range response.Responses { for _, pr := range r.PartitionResponses { if pr.ErrorCode != 0 { - return offsetCommitResponseV3{}, Error(pr.ErrorCode) + return offsetCommitResponseV2{}, Error(pr.ErrorCode) } } } @@ -332,15 +465,16 @@ func (c *Conn) offsetCommit(request offsetCommitRequestV3) (offsetCommitResponse return response, nil } -// offsetFetch fetches the offsets for the specified topic partitions +// offsetFetch fetches the offsets for the specified topic partitions. +// -1 indicates that there is no offset saved for the partition. // // See http://kafka.apache.org/protocol.html#The_Messages_OffsetFetch -func (c *Conn) offsetFetch(request offsetFetchRequestV3) (offsetFetchResponseV3, error) { - var response offsetFetchResponseV3 +func (c *Conn) offsetFetch(request offsetFetchRequestV1) (offsetFetchResponseV1, error) { + var response offsetFetchResponseV1 err := c.readOperation( func(deadline time.Time, id int32) error { - return c.writeRequest(offsetFetchRequest, v3, id, request) + return c.writeRequest(offsetFetchRequest, v1, id, request) }, func(deadline time.Time, size int) error { return expectZeroSize(func() (remain int, err error) { @@ -349,15 +483,12 @@ func (c *Conn) offsetFetch(request offsetFetchRequestV3) (offsetFetchResponseV3, }, ) if err != nil { - return offsetFetchResponseV3{}, err - } - if response.ErrorCode != 0 { - return offsetFetchResponseV3{}, Error(response.ErrorCode) + return offsetFetchResponseV1{}, err } for _, r := range response.Responses { for _, pr := range r.PartitionResponses { if pr.ErrorCode != 0 { - return offsetFetchResponseV3{}, Error(pr.ErrorCode) + return offsetFetchResponseV1{}, Error(pr.ErrorCode) } } } @@ -365,15 +496,15 @@ func (c *Conn) offsetFetch(request offsetFetchRequestV3) (offsetFetchResponseV3, return response, nil } -// syncGroups completes the handshake to join a consumer group +// syncGroup completes the handshake to join a consumer group // // See http://kafka.apache.org/protocol.html#The_Messages_SyncGroup -func (c *Conn) syncGroups(request syncGroupRequestV1) (syncGroupResponseV1, error) { - var response syncGroupResponseV1 +func (c *Conn) syncGroup(request syncGroupRequestV0) (syncGroupResponseV0, error) { + var response syncGroupResponseV0 err := c.readOperation( func(deadline time.Time, id int32) error { - return c.writeRequest(syncGroupRequest, v1, id, request) + return c.writeRequest(syncGroupRequest, v0, id, request) }, func(deadline time.Time, size int) error { return expectZeroSize(func() (remain int, err error) { @@ -382,10 +513,10 @@ func (c *Conn) syncGroups(request syncGroupRequestV1) (syncGroupResponseV1, erro }, ) if err != nil { - return syncGroupResponseV1{}, err + return syncGroupResponseV0{}, err } if response.ErrorCode != 0 { - return syncGroupResponseV1{}, Error(response.ErrorCode) + return syncGroupResponseV0{}, Error(response.ErrorCode) } return response, nil @@ -449,31 +580,67 @@ func (c *Conn) Offset() (offset int64, whence int) { c.mutex.Lock() offset = c.offset c.mutex.Unlock() + switch offset { - case -1: + case FirstOffset: offset = 0 - whence = 2 - case -2: + whence = SeekStart + case LastOffset: offset = 0 - whence = 0 + whence = SeekEnd default: - whence = 1 + whence = SeekAbsolute } return } -// Seek changes the offset of the connection to offset, interpreted according to -// whence: 0 means relative to the first offset, 1 means relative to the current -// offset, and 2 means relative to the last offset. -// The method returns the new absoluate offset of the connection. +const ( + SeekStart = 0 // Seek relative to the first offset available in the partition. + SeekAbsolute = 1 // Seek to an absolute offset. + SeekEnd = 2 // Seek relative to the last offset available in the partition. + SeekCurrent = 3 // Seek relative to the current offset. + + // This flag may be combined to any of the SeekAbsolute and SeekCurrent + // constants to skip the bound check that the connection would do otherwise. + // Programs can use this flag to avoid making a metadata request to the kafka + // broker to read the current first and last offsets of the partition. + SeekDontCheck = 1 << 30 +) + +// Seek sets the offset for the next read or write operation according to whence, which +// should be one of SeekStart, SeekAbsolute, SeekEnd, or SeekCurrent. +// When seeking relative to the end, the offset is subtracted from the current offset. +// Note that for historical reasons, these do not align with the usual whence constants +// as in lseek(2) or os.Seek. +// The method returns the new absolute offset of the connection. func (c *Conn) Seek(offset int64, whence int) (int64, error) { + seekDontCheck := (whence & SeekDontCheck) != 0 + whence &= ^SeekDontCheck + switch whence { - case 0, 1, 2: + case SeekStart, SeekAbsolute, SeekEnd, SeekCurrent: default: - return 0, fmt.Errorf("the whence value has to be 0, 1, or 2 (whence = %d)", whence) + return 0, fmt.Errorf("whence must be one of 0, 1, 2, or 3. (whence = %d)", whence) } - if whence == 1 { + if seekDontCheck { + if whence == SeekAbsolute { + c.mutex.Lock() + c.offset = offset + c.mutex.Unlock() + return offset, nil + } + + if whence == SeekCurrent { + c.mutex.Lock() + c.offset += offset + offset = c.offset + c.mutex.Unlock() + return offset, nil + } + } + + if whence == SeekAbsolute { c.mutex.Lock() unchanged := offset == c.offset c.mutex.Unlock() @@ -482,15 +649,21 @@ func (c *Conn) Seek(offset int64, whence int) (int64, error) { } } + if whence == SeekCurrent { + c.mutex.Lock() + offset = c.offset + offset + c.mutex.Unlock() + } + first, last, err := c.ReadOffsets() if err != nil { return 0, err } switch whence { - case 0: + case SeekStart: offset = first + offset - case 2: + case SeekEnd: offset = last - offset } @@ -560,21 +733,33 @@ func (c *Conn) ReadMessage(maxBytes int) (Message, error) { // A program doesn't specify the number of messages in wants from a batch, but // gives the minimum and maximum number of bytes that it wants to receive from // the kafka server. -func (c *Conn) ReadBatch(minBytes int, maxBytes int) *Batch { +func (c *Conn) ReadBatch(minBytes, maxBytes int) *Batch { + return c.ReadBatchWith(ReadBatchConfig{ + MinBytes: minBytes, + MaxBytes: maxBytes, + }) +} + +// ReadBatchWith in every way is similar to ReadBatch. ReadBatch is configured +// with the default values in ReadBatchConfig except for minBytes and maxBytes. +func (c *Conn) ReadBatchWith(cfg ReadBatchConfig) *Batch { + var adjustedDeadline time.Time var maxFetch = int(c.fetchMaxBytes) - if minBytes < 0 || minBytes > maxFetch { - return &Batch{err: fmt.Errorf("kafka.(*Conn).ReadBatch: minBytes of %d out of [1,%d] bounds", minBytes, maxFetch)} + if cfg.MinBytes < 0 || cfg.MinBytes > maxFetch { + return &Batch{err: fmt.Errorf("kafka.(*Conn).ReadBatch: minBytes of %d out of [1,%d] bounds", cfg.MinBytes, maxFetch)} } - if maxBytes < 0 || maxBytes > maxFetch { - return &Batch{err: fmt.Errorf("kafka.(*Conn).ReadBatch: maxBytes of %d out of [1,%d] bounds", maxBytes, maxFetch)} + if cfg.MaxBytes < 0 || cfg.MaxBytes > maxFetch { + return &Batch{err: fmt.Errorf("kafka.(*Conn).ReadBatch: maxBytes of %d out of [1,%d] bounds", cfg.MaxBytes, maxFetch)} } - if minBytes > maxBytes { - return &Batch{err: fmt.Errorf("kafka.(*Conn).ReadBatch: minBytes (%d) > maxBytes (%d)", minBytes, maxBytes)} + if cfg.MinBytes > cfg.MaxBytes { + return &Batch{err: fmt.Errorf("kafka.(*Conn).ReadBatch: minBytes (%d) > maxBytes (%d)", cfg.MinBytes, cfg.MaxBytes)} } - offset, err := c.Seek(c.Offset()) + offset, whence := c.Offset() + + offset, err := c.Seek(offset, whence|SeekDontCheck) if err != nil { return &Batch{err: dontExpectEOF(err)} } @@ -582,18 +767,43 @@ func (c *Conn) ReadBatch(minBytes int, maxBytes int) *Batch { id, err := c.doRequest(&c.rdeadline, func(deadline time.Time, id int32) error { now := time.Now() deadline = adjustDeadlineForRTT(deadline, now, defaultRTT) - adjustedDeadline = deadline - return writeFetchRequestV1( - &c.wbuf, - id, - c.clientID, - c.topic, - c.partition, - offset, - minBytes, - maxBytes+int(c.fetchMinSize), - deadlineToTimeout(deadline, now), - ) + switch c.fetchVersion { + case v10: + return c.wb.writeFetchRequestV10( + id, + c.clientID, + c.topic, + c.partition, + offset, + cfg.MinBytes, + cfg.MaxBytes+int(c.fetchMinSize), + deadlineToTimeout(deadline, now), + int8(cfg.IsolationLevel), + ) + case v5: + return c.wb.writeFetchRequestV5( + id, + c.clientID, + c.topic, + c.partition, + offset, + cfg.MinBytes, + cfg.MaxBytes+int(c.fetchMinSize), + deadlineToTimeout(deadline, now), + int8(cfg.IsolationLevel), + ) + default: + return c.wb.writeFetchRequestV2( + id, + c.clientID, + c.topic, + c.partition, + offset, + cfg.MinBytes, + cfg.MaxBytes+int(c.fetchMinSize), + deadlineToTimeout(deadline, now), + ) + } }) if err != nil { return &Batch{err: dontExpectEOF(err)} @@ -604,14 +814,39 @@ func (c *Conn) ReadBatch(minBytes int, maxBytes int) *Batch { return &Batch{err: dontExpectEOF(err)} } - throttle, highWaterMark, remain, err := readFetchResponseHeader(&c.rbuf, size) + var throttle int32 + var highWaterMark int64 + var remain int + + switch c.fetchVersion { + case v10: + throttle, highWaterMark, remain, err = readFetchResponseHeaderV10(&c.rbuf, size) + case v5: + throttle, highWaterMark, remain, err = readFetchResponseHeaderV5(&c.rbuf, size) + default: + throttle, highWaterMark, remain, err = readFetchResponseHeaderV2(&c.rbuf, size) + } + if err == errShortRead { + err = checkTimeoutErr(adjustedDeadline) + } + + var msgs *messageSetReader + if err == nil { + if highWaterMark == offset { + msgs = &messageSetReader{empty: true} + } else { + msgs, err = newMessageSetReader(&c.rbuf, remain) + } + } + if err == errShortRead { + err = checkTimeoutErr(adjustedDeadline) + } return &Batch{ conn: c, - reader: &c.rbuf, + msgs: msgs, deadline: adjustedDeadline, throttle: duration(throttle), lock: lock, - remain: remain, topic: c.topic, // topic is copied to Batch to prevent race with Batch.close partition: int(c.partition), // partition is copied to Batch to prevent race with Batch.close offset: offset, @@ -628,17 +863,17 @@ func (c *Conn) ReadOffset(t time.Time) (int64, error) { // ReadFirstOffset returns the first offset available on the connection. func (c *Conn) ReadFirstOffset() (int64, error) { - return c.readOffset(-2) + return c.readOffset(FirstOffset) } // ReadLastOffset returns the last offset available on the connection. func (c *Conn) ReadLastOffset() (int64, error) { - return c.readOffset(-1) + return c.readOffset(LastOffset) } // ReadOffsets returns the absolute first and last offsets of the topic used by // the connection. -func (c *Conn) ReadOffsets() (first int64, last int64, err error) { +func (c *Conn) ReadOffsets() (first, last int64, err error) { // We have to submit two different requests to fetch the first and last // offsets because kafka refuses requests that ask for multiple offsets // on the same topic and partition. @@ -655,7 +890,7 @@ func (c *Conn) ReadOffsets() (first int64, last int64, err error) { func (c *Conn) readOffset(t int64) (offset int64, err error) { err = c.readOperation( func(deadline time.Time, id int32) error { - return writeListOffsetRequestV1(&c.wbuf, id, c.clientID, c.topic, c.partition, t) + return c.wb.writeListOffsetRequestV1(id, c.clientID, c.topic, c.partition, t) }, func(deadline time.Time, size int) error { return expectZeroSize(readArrayWith(&c.rbuf, size, func(r *bufio.Reader, size int) (int, error) { @@ -701,10 +936,10 @@ func (c *Conn) ReadPartitions(topics ...string) (partitions []Partition, err err err = c.readOperation( func(deadline time.Time, id int32) error { - return c.writeRequest(metadataRequest, v0, id, topicMetadataRequestV0(topics)) + return c.writeRequest(metadataRequest, v1, id, topicMetadataRequestV1(topics)) }, func(deadline time.Time, size int) error { - var res metadataResponseV0 + var res metadataResponseV1 if err := c.readResponse(size, &res); err != nil { return err @@ -716,6 +951,7 @@ func (c *Conn) ReadPartitions(topics ...string) (partitions []Partition, err err Host: b.Host, Port: int(b.Port), ID: int(b.NodeID), + Rack: b.Rack, } } @@ -728,7 +964,7 @@ func (c *Conn) ReadPartitions(topics ...string) (partitions []Partition, err err } for _, t := range res.Topics { - if t.TopicErrorCode != 0 && t.TopicName == c.topic { + if t.TopicErrorCode != 0 && (c.topic == "" || t.TopicName == c.topic) { // We only report errors if they happened for the topic of // the connection, otherwise the topic will simply have no // partitions in the result set. @@ -759,52 +995,103 @@ func (c *Conn) ReadPartitions(topics ...string) (partitions []Partition, err err // This method is exposed to satisfy the net.Conn interface but is less efficient // than the more general purpose WriteMessages method. func (c *Conn) Write(b []byte) (int, error) { - return c.WriteMessages(Message{Value: b}) + return c.WriteCompressedMessages(nil, Message{Value: b}) } // WriteMessages writes a batch of messages to the connection's topic and // partition, returning the number of bytes written. The write is an atomic // operation, it either fully succeeds or fails. func (c *Conn) WriteMessages(msgs ...Message) (int, error) { + return c.WriteCompressedMessages(nil, msgs...) +} + +// WriteCompressedMessages writes a batch of messages to the connection's topic +// and partition, returning the number of bytes written. The write is an atomic +// operation, it either fully succeeds or fails. +// +// If the compression codec is not nil, the messages will be compressed. +func (c *Conn) WriteCompressedMessages(codec CompressionCodec, msgs ...Message) (nbytes int, err error) { + nbytes, _, _, _, err = c.writeCompressedMessages(codec, msgs...) + return +} + +// WriteCompressedMessagesAt writes a batch of messages to the connection's topic +// and partition, returning the number of bytes written, partition and offset numbers +// and timestamp assigned by the kafka broker to the message set. The write is an atomic +// operation, it either fully succeeds or fails. +// +// If the compression codec is not nil, the messages will be compressed. +func (c *Conn) WriteCompressedMessagesAt(codec CompressionCodec, msgs ...Message) (nbytes int, partition int32, offset int64, appendTime time.Time, err error) { + return c.writeCompressedMessages(codec, msgs...) +} + +func (c *Conn) writeCompressedMessages(codec CompressionCodec, msgs ...Message) (nbytes int, partition int32, offset int64, appendTime time.Time, err error) { + if len(msgs) == 0 { - return 0, nil + return } writeTime := time.Now() - for _, msg := range msgs { + for i, msg := range msgs { // users may believe they can set the Topic and/or Partition // on the kafka message. if msg.Topic != "" && msg.Topic != c.topic { - return 0, errInvalidWriteTopic + err = errInvalidWriteTopic + return } if msg.Partition != 0 { - return 0, errInvalidWritePartition + err = errInvalidWritePartition + return } if msg.Time.IsZero() { - msg.Time = writeTime + msgs[i].Time = writeTime } - } - n := 0 - for _, msg := range msgs { - n += len(msg.Key) + len(msg.Value) + nbytes += len(msg.Key) + len(msg.Value) } - err := c.writeOperation( + err = c.writeOperation( func(deadline time.Time, id int32) error { now := time.Now() deadline = adjustDeadlineForRTT(deadline, now, defaultRTT) - return writeProduceRequestV2( - &c.wbuf, - id, - c.clientID, - c.topic, - c.partition, - deadlineToTimeout(deadline, now), - int16(atomic.LoadInt32(&c.requiredAcks)), - msgs..., - ) + switch version := c.apiVersions[produceRequest].MaxVersion; { + case version >= 7: + return c.wb.writeProduceRequestV7( + codec, + id, + c.clientID, + c.topic, + c.partition, + deadlineToTimeout(deadline, now), + int16(atomic.LoadInt32(&c.requiredAcks)), + c.transactionalID, + msgs..., + ) + case version >= 3: + return c.wb.writeProduceRequestV3( + codec, + id, + c.clientID, + c.topic, + c.partition, + deadlineToTimeout(deadline, now), + int16(atomic.LoadInt32(&c.requiredAcks)), + c.transactionalID, + msgs..., + ) + default: + return c.wb.writeProduceRequestV2( + codec, + id, + c.clientID, + c.topic, + c.partition, + deadlineToTimeout(deadline, now), + int16(atomic.LoadInt32(&c.requiredAcks)), + msgs..., + ) + } }, func(deadline time.Time, size int) error { return expectZeroSize(readArrayWith(&c.rbuf, size, func(r *bufio.Reader, size int) (int, error) { @@ -818,12 +1105,33 @@ func (c *Conn) WriteMessages(msgs ...Message) (int, error) { // Read the list of partitions, there should be only one since // we've produced a message to a single partition. size, err = readArrayWith(r, size, func(r *bufio.Reader, size int) (int, error) { - var p produceResponsePartitionV2 - size, err := p.readFrom(r, size) - if err == nil && p.ErrorCode != 0 { - err = Error(p.ErrorCode) + switch c.produceVersion { + case v7: + var p produceResponsePartitionV7 + size, err := p.readFrom(r, size) + if err == nil && p.ErrorCode != 0 { + err = Error(p.ErrorCode) + } + if err == nil { + partition = p.Partition + offset = p.Offset + appendTime = time.Unix(0, p.Timestamp*int64(time.Millisecond)) + } + return size, err + default: + var p produceResponsePartitionV2 + size, err := p.readFrom(r, size) + if err == nil && p.ErrorCode != 0 { + err = Error(p.ErrorCode) + } + if err == nil { + partition = p.Partition + offset = p.Offset + appendTime = time.Unix(0, p.Timestamp*int64(time.Millisecond)) + } + return size, err } - return size, err + }) if err != nil { return size, err @@ -837,10 +1145,10 @@ func (c *Conn) WriteMessages(msgs ...Message) (int, error) { ) if err != nil { - n = 0 + nbytes = 0 } - return n, err + return } // SetRequiredAcks sets the number of acknowledges from replicas that the @@ -858,14 +1166,14 @@ func (c *Conn) SetRequiredAcks(n int) error { func (c *Conn) writeRequestHeader(apiKey apiKey, apiVersion apiVersion, correlationID int32, size int32) { hdr := c.requestHeader(apiKey, apiVersion, correlationID) hdr.Size = (hdr.size() + size) - 4 - hdr.writeTo(&c.wbuf) + hdr.writeTo(&c.wb) } func (c *Conn) writeRequest(apiKey apiKey, apiVersion apiVersion, correlationID int32, req request) error { hdr := c.requestHeader(apiKey, apiVersion, correlationID) hdr.Size = (hdr.size() + req.size()) - 4 - hdr.writeTo(&c.wbuf) - req.writeTo(&c.wbuf) + hdr.writeTo(&c.wb) + req.writeTo(&c.wb) return c.wbuf.Flush() } @@ -953,7 +1261,18 @@ func (c *Conn) doRequest(d *connDeadline, write func(time.Time, int32) error) (i } func (c *Conn) waitResponse(d *connDeadline, id int32) (deadline time.Time, size int, lock *sync.Mutex, err error) { - for { + // I applied exactly zero scientific process to choose this value, + // it seemed to worked fine in practice tho. + // + // My guess is 100 iterations where the goroutine gets descheduled + // by calling runtime.Gosched() may end up on a wait of ~10ms to ~1s + // (if the programs is heavily CPU bound and has lots of goroutines), + // so it should allow for bailing quickly without taking too much risk + // to get false positives. + const maxAttempts = 100 + var lastID int32 + + for attempt := 0; attempt < maxAttempts; { var rsz int32 var rid int32 @@ -977,7 +1296,26 @@ func (c *Conn) waitResponse(d *connDeadline, id int32) (deadline time.Time, size // been received but the current operation is not the target for it. c.rlock.Unlock() runtime.Gosched() + + // This check is a safety mechanism, if we make too many loop + // iterations and always draw the same id then we could be facing + // corrupted data on the wire, or the goroutine(s) sharing ownership + // of this connection may have panicked and therefore will not be able + // to participate in consuming bytes from the wire. To prevent entering + // an infinite loop which reads the same value over and over we bail + // with the uncommon io.ErrNoProgress error which should give a good + // enough signal about what is going wrong. + if rid != lastID { + attempt++ + } else { + attempt = 0 + } + + lastID = rid } + + err = io.ErrNoProgress + return } func (c *Conn) requestHeader(apiKey apiKey, apiVersion apiVersion, correlationID int32) requestHeader { @@ -989,6 +1327,85 @@ func (c *Conn) requestHeader(apiKey apiKey, apiVersion apiVersion, correlationID } } +type ApiVersion struct { + ApiKey int16 + MinVersion int16 + MaxVersion int16 +} + +var defaultApiVersions map[apiKey]ApiVersion = map[apiKey]ApiVersion{ + produceRequest: ApiVersion{int16(produceRequest), int16(v2), int16(v2)}, + fetchRequest: ApiVersion{int16(fetchRequest), int16(v2), int16(v2)}, + listOffsetRequest: ApiVersion{int16(listOffsetRequest), int16(v1), int16(v1)}, + metadataRequest: ApiVersion{int16(metadataRequest), int16(v1), int16(v1)}, + offsetCommitRequest: ApiVersion{int16(offsetCommitRequest), int16(v2), int16(v2)}, + offsetFetchRequest: ApiVersion{int16(offsetFetchRequest), int16(v1), int16(v1)}, + groupCoordinatorRequest: ApiVersion{int16(groupCoordinatorRequest), int16(v0), int16(v0)}, + joinGroupRequest: ApiVersion{int16(joinGroupRequest), int16(v1), int16(v1)}, + heartbeatRequest: ApiVersion{int16(heartbeatRequest), int16(v0), int16(v0)}, + leaveGroupRequest: ApiVersion{int16(leaveGroupRequest), int16(v0), int16(v0)}, + syncGroupRequest: ApiVersion{int16(syncGroupRequest), int16(v0), int16(v0)}, + describeGroupsRequest: ApiVersion{int16(describeGroupsRequest), int16(v1), int16(v1)}, + listGroupsRequest: ApiVersion{int16(listGroupsRequest), int16(v1), int16(v1)}, + apiVersionsRequest: ApiVersion{int16(apiVersionsRequest), int16(v0), int16(v0)}, + createTopicsRequest: ApiVersion{int16(createTopicsRequest), int16(v0), int16(v0)}, + deleteTopicsRequest: ApiVersion{int16(deleteTopicsRequest), int16(v1), int16(v1)}, +} + +func (c *Conn) ApiVersions() ([]ApiVersion, error) { + id, err := c.doRequest(&c.rdeadline, func(deadline time.Time, id int32) error { + now := time.Now() + deadline = adjustDeadlineForRTT(deadline, now, defaultRTT) + + h := requestHeader{ + ApiKey: int16(apiVersionsRequest), + ApiVersion: int16(v0), + CorrelationID: id, + ClientID: c.clientID, + } + h.Size = (h.size() - 4) + + h.writeTo(&c.wb) + return c.wbuf.Flush() + }) + if err != nil { + return nil, err + } + + _, size, lock, err := c.waitResponse(&c.rdeadline, id) + if err != nil { + return nil, err + } + defer lock.Unlock() + + var errorCode int16 + if size, err = readInt16(&c.rbuf, size, &errorCode); err != nil { + return nil, err + } + var arrSize int32 + if size, err = readInt32(&c.rbuf, size, &arrSize); err != nil { + return nil, err + } + r := make([]ApiVersion, arrSize) + for i := 0; i < int(arrSize); i++ { + if size, err = readInt16(&c.rbuf, size, &r[i].ApiKey); err != nil { + return nil, err + } + if size, err = readInt16(&c.rbuf, size, &r[i].MinVersion); err != nil { + return nil, err + } + if size, err = readInt16(&c.rbuf, size, &r[i].MaxVersion); err != nil { + return nil, err + } + } + + if errorCode != 0 { + return r, Error(errorCode) + } + + return r, nil +} + // connDeadline is a helper type to implement read/write deadline management on // the kafka connection. type connDeadline struct { @@ -1049,3 +1466,88 @@ func (d *connDeadline) unsetConnWriteDeadline() { d.wconn = nil d.mutex.Unlock() } + +// saslHandshake sends the SASL handshake message. This will determine whether +// the Mechanism is supported by the cluster. If it's not, this function will +// error out with UnsupportedSASLMechanism. +// +// If the mechanism is unsupported, the handshake request will reply with the +// list of the cluster's configured mechanisms, which could potentially be used +// to facilitate negotiation. At the moment, we are not negotiating the +// mechanism as we believe that brokers are usually known to the client, and +// therefore the client should already know which mechanisms are supported. +// +// See http://kafka.apache.org/protocol.html#The_Messages_SaslHandshake +func (c *Conn) saslHandshake(mechanism string) error { + // The wire format for V0 and V1 is identical, but the version + // number will affect how the SASL authentication + // challenge/responses are sent + var resp saslHandshakeResponseV0 + version := v0 + if c.apiVersions[saslHandshakeRequest].MaxVersion >= 1 { + version = v1 + } + + err := c.writeOperation( + func(deadline time.Time, id int32) error { + return c.writeRequest(saslHandshakeRequest, version, id, &saslHandshakeRequestV0{Mechanism: mechanism}) + }, + func(deadline time.Time, size int) error { + return expectZeroSize(func() (int, error) { + return (&resp).readFrom(&c.rbuf, size) + }()) + }, + ) + if err == nil && resp.ErrorCode != 0 { + err = Error(resp.ErrorCode) + } + return err +} + +// saslAuthenticate sends the SASL authenticate message. This function must +// be immediately preceded by a successful saslHandshake. +// +// See http://kafka.apache.org/protocol.html#The_Messages_SaslAuthenticate +func (c *Conn) saslAuthenticate(data []byte) ([]byte, error) { + // if we sent a v1 handshake, then we must encapsulate the authentication + // request in a saslAuthenticateRequest. otherwise, we read and write raw + // bytes. + if c.apiVersions[saslHandshakeRequest].MaxVersion >= 1 { + var request = saslAuthenticateRequestV0{Data: data} + var response saslAuthenticateResponseV0 + + err := c.writeOperation( + func(deadline time.Time, id int32) error { + return c.writeRequest(saslAuthenticateRequest, v0, id, request) + }, + func(deadline time.Time, size int) error { + return expectZeroSize(func() (remain int, err error) { + return (&response).readFrom(&c.rbuf, size) + }()) + }, + ) + if err == nil && response.ErrorCode != 0 { + err = Error(response.ErrorCode) + } + return response.Data, err + } + + // fall back to opaque bytes on the wire. the broker is expecting these if + // it just processed a v0 sasl handshake. + c.wb.writeInt32(int32(len(data))) + if _, err := c.wb.Write(data); err != nil { + return nil, err + } + if err := c.wb.Flush(); err != nil { + return nil, err + } + + var respLen int32 + _, err := readInt32(&c.rbuf, 4, &respLen) + if err != nil { + return nil, err + } + + resp, _, err := readNewBytes(&c.rbuf, int(respLen), int(respLen)) + return resp, err +} diff --git a/vendor/github.com/segmentio/kafka-go/consumergroup.go b/vendor/github.com/segmentio/kafka-go/consumergroup.go new file mode 100644 index 000000000..8975d24d2 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/consumergroup.go @@ -0,0 +1,1092 @@ +package kafka + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io" + "log" + "math" + "strings" + "sync" + "time" +) + +// ErrGroupClosed is returned by ConsumerGroup.Next when the group has already +// been closed. +var ErrGroupClosed = errors.New("consumer group is closed") + +// ErrGenerationEnded is returned by the context.Context issued by the +// Generation's Start function when the context has been closed. +var ErrGenerationEnded = errors.New("consumer group generation has ended") + +const ( + // defaultProtocolType holds the default protocol type documented in the + // kafka protocol + // + // See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-GroupMembershipAPI + defaultProtocolType = "consumer" + + // defaultHeartbeatInterval contains the default time between heartbeats. If + // the coordinator does not receive a heartbeat within the session timeout interval, + // the consumer will be considered dead and the coordinator will rebalance the + // group. + // + // As a rule, the heartbeat interval should be no greater than 1/3 the session timeout + defaultHeartbeatInterval = 3 * time.Second + + // defaultSessionTimeout contains the default interval the coordinator will wait + // for a heartbeat before marking a consumer as dead + defaultSessionTimeout = 30 * time.Second + + // defaultRebalanceTimeout contains the amount of time the coordinator will wait + // for consumers to issue a join group once a rebalance has been requested + defaultRebalanceTimeout = 30 * time.Second + + // defaultJoinGroupBackoff is the amount of time to wait after a failed + // consumer group generation before attempting to re-join. + defaultJoinGroupBackoff = 5 * time.Second + + // defaultRetentionTime holds the length of time a the consumer group will be + // saved by kafka + defaultRetentionTime = time.Hour * 24 + + // defaultPartitionWatchTime contains the amount of time the kafka-go will wait to + // query the brokers looking for partition changes. + defaultPartitionWatchTime = 5 * time.Second +) + +// ConsumerGroupConfig is a configuration object used to create new instances of +// ConsumerGroup. +type ConsumerGroupConfig struct { + // ID is the consumer group ID. It must not be empty. + ID string + + // The list of broker addresses used to connect to the kafka cluster. It + // must not be empty. + Brokers []string + + // An dialer used to open connections to the kafka server. This field is + // optional, if nil, the default dialer is used instead. + Dialer *Dialer + + // Topics is the list of topics that will be consumed by this group. It + // will usually have a single value, but it is permitted to have multiple + // for more complex use cases. + Topics []string + + // GroupBalancers is the priority-ordered list of client-side consumer group + // balancing strategies that will be offered to the coordinator. The first + // strategy that all group members support will be chosen by the leader. + // + // Default: [Range, RoundRobin] + GroupBalancers []GroupBalancer + + // HeartbeatInterval sets the optional frequency at which the reader sends the consumer + // group heartbeat update. + // + // Default: 3s + HeartbeatInterval time.Duration + + // PartitionWatchInterval indicates how often a reader checks for partition changes. + // If a reader sees a partition change (such as a partition add) it will rebalance the group + // picking up new partitions. + // + // Default: 5s + PartitionWatchInterval time.Duration + + // WatchForPartitionChanges is used to inform kafka-go that a consumer group should be + // polling the brokers and rebalancing if any partition changes happen to the topic. + WatchPartitionChanges bool + + // SessionTimeout optionally sets the length of time that may pass without a heartbeat + // before the coordinator considers the consumer dead and initiates a rebalance. + // + // Default: 30s + SessionTimeout time.Duration + + // RebalanceTimeout optionally sets the length of time the coordinator will wait + // for members to join as part of a rebalance. For kafka servers under higher + // load, it may be useful to set this value higher. + // + // Default: 30s + RebalanceTimeout time.Duration + + // JoinGroupBackoff optionally sets the length of time to wait before re-joining + // the consumer group after an error. + // + // Default: 5s + JoinGroupBackoff time.Duration + + // RetentionTime optionally sets the length of time the consumer group will be saved + // by the broker + // + // Default: 24h + RetentionTime time.Duration + + // StartOffset determines from whence the consumer group should begin + // consuming when it finds a partition without a committed offset. If + // non-zero, it must be set to one of FirstOffset or LastOffset. + // + // Default: FirstOffset + StartOffset int64 + + // If not nil, specifies a logger used to report internal changes within the + // reader. + Logger *log.Logger + + // ErrorLogger is the logger used to report errors. If nil, the reader falls + // back to using Logger instead. + ErrorLogger *log.Logger + + // connect is a function for dialing the coordinator. This is provided for + // unit testing to mock broker connections. + connect func(dialer *Dialer, brokers ...string) (coordinator, error) +} + +// Validate method validates ConsumerGroupConfig properties and sets relevant +// defaults. +func (config *ConsumerGroupConfig) Validate() error { + + if len(config.Brokers) == 0 { + return errors.New("cannot create a consumer group with an empty list of broker addresses") + } + + if len(config.Topics) == 0 { + return errors.New("cannot create a consumer group without a topic") + } + + if config.ID == "" { + return errors.New("cannot create a consumer group without an ID") + } + + if config.Dialer == nil { + config.Dialer = DefaultDialer + } + + if len(config.GroupBalancers) == 0 { + config.GroupBalancers = []GroupBalancer{ + RangeGroupBalancer{}, + RoundRobinGroupBalancer{}, + } + } + + if config.HeartbeatInterval == 0 { + config.HeartbeatInterval = defaultHeartbeatInterval + } + + if config.SessionTimeout == 0 { + config.SessionTimeout = defaultSessionTimeout + } + + if config.PartitionWatchInterval == 0 { + config.PartitionWatchInterval = defaultPartitionWatchTime + } + + if config.RebalanceTimeout == 0 { + config.RebalanceTimeout = defaultRebalanceTimeout + } + + if config.JoinGroupBackoff == 0 { + config.JoinGroupBackoff = defaultJoinGroupBackoff + } + + if config.RetentionTime == 0 { + config.RetentionTime = defaultRetentionTime + } + + if config.HeartbeatInterval < 0 || (config.HeartbeatInterval/time.Millisecond) >= math.MaxInt32 { + return errors.New(fmt.Sprintf("HeartbeatInterval out of bounds: %d", config.HeartbeatInterval)) + } + + if config.SessionTimeout < 0 || (config.SessionTimeout/time.Millisecond) >= math.MaxInt32 { + return errors.New(fmt.Sprintf("SessionTimeout out of bounds: %d", config.SessionTimeout)) + } + + if config.RebalanceTimeout < 0 || (config.RebalanceTimeout/time.Millisecond) >= math.MaxInt32 { + return errors.New(fmt.Sprintf("RebalanceTimeout out of bounds: %d", config.RebalanceTimeout)) + } + + if config.JoinGroupBackoff < 0 || (config.JoinGroupBackoff/time.Millisecond) >= math.MaxInt32 { + return errors.New(fmt.Sprintf("JoinGroupBackoff out of bounds: %d", config.JoinGroupBackoff)) + } + + if config.RetentionTime < 0 { + return errors.New(fmt.Sprintf("RetentionTime out of bounds: %d", config.RetentionTime)) + } + + if config.PartitionWatchInterval < 0 || (config.PartitionWatchInterval/time.Millisecond) >= math.MaxInt32 { + return errors.New(fmt.Sprintf("PartitionWachInterval out of bounds %d", config.PartitionWatchInterval)) + } + + if config.StartOffset == 0 { + config.StartOffset = FirstOffset + } + + if config.StartOffset != FirstOffset && config.StartOffset != LastOffset { + return errors.New(fmt.Sprintf("StartOffset is not valid %d", config.StartOffset)) + } + + if config.connect == nil { + config.connect = connect + } + + return nil +} + +// PartitionAssignment represents the starting state of a partition that has +// been assigned to a consumer. +type PartitionAssignment struct { + // ID is the partition ID. + ID int + + // Offset is the initial offset at which this assignment begins. It will + // either be an absolute offset if one has previously been committed for + // the consumer group or a relative offset such as FirstOffset when this + // is the first time the partition have been assigned to a member of the + // group. + Offset int64 +} + +// genCtx adapts the done channel of the generation to a context.Context. This +// is used by Generation.Start so that we can pass a context to go routines +// instead of passing around channels. +type genCtx struct { + gen *Generation +} + +func (c genCtx) Done() <-chan struct{} { + return c.gen.done +} + +func (c genCtx) Err() error { + select { + case <-c.gen.done: + return ErrGenerationEnded + default: + return nil + } +} + +func (c genCtx) Deadline() (time.Time, bool) { + return time.Time{}, false +} + +func (c genCtx) Value(interface{}) interface{} { + return nil +} + +// Generation represents a single consumer group generation. The generation +// carries the topic+partition assignments for the given. It also provides +// facilities for committing offsets and for running functions whose lifecycles +// are bound to the generation. +type Generation struct { + // ID is the generation ID as assigned by the consumer group coordinator. + ID int32 + + // GroupID is the name of the consumer group. + GroupID string + + // MemberID is the ID assigned to this consumer by the consumer group + // coordinator. + MemberID string + + // Assignments is the initial state of this Generation. The partition + // assignments are grouped by topic. + Assignments map[string][]PartitionAssignment + + conn coordinator + + once sync.Once + done chan struct{} + wg sync.WaitGroup + + retentionMillis int64 + log func(func(*log.Logger)) + logError func(func(*log.Logger)) +} + +// close stops the generation and waits for all functions launched via Start to +// terminate. +func (g *Generation) close() { + g.once.Do(func() { + close(g.done) + }) + g.wg.Wait() +} + +// Start launches the provided function in a go routine and adds accounting such +// that when the function exits, it stops the current generation (if not +// already in the process of doing so). +// +// The provided function MUST support cancellation via the ctx argument and exit +// in a timely manner once the ctx is complete. When the context is closed, the +// context's Error() function will return ErrGenerationEnded. +// +// When closing out a generation, the consumer group will wait for all functions +// launched by Start to exit before the group can move on and join the next +// generation. If the function does not exit promptly, it will stop forward +// progress for this consumer and potentially cause consumer group membership +// churn. +func (g *Generation) Start(fn func(ctx context.Context)) { + g.wg.Add(1) + go func() { + fn(genCtx{g}) + // shut down the generation as soon as one function exits. this is + // different from close() in that it doesn't wait on the wg. + g.once.Do(func() { + close(g.done) + }) + g.wg.Done() + }() +} + +// CommitOffsets commits the provided topic+partition+offset combos to the +// consumer group coordinator. This can be used to reset the consumer to +// explicit offsets. +func (g *Generation) CommitOffsets(offsets map[string]map[int]int64) error { + if len(offsets) == 0 { + return nil + } + + topics := make([]offsetCommitRequestV2Topic, 0, len(offsets)) + for topic, partitions := range offsets { + t := offsetCommitRequestV2Topic{Topic: topic} + for partition, offset := range partitions { + t.Partitions = append(t.Partitions, offsetCommitRequestV2Partition{ + Partition: int32(partition), + Offset: offset, + }) + } + topics = append(topics, t) + } + + request := offsetCommitRequestV2{ + GroupID: g.GroupID, + GenerationID: g.ID, + MemberID: g.MemberID, + RetentionTime: g.retentionMillis, + Topics: topics, + } + + _, err := g.conn.offsetCommit(request) + if err == nil { + // if logging is enabled, print out the partitions that were committed. + g.log(func(l *log.Logger) { + var report []string + for _, t := range request.Topics { + report = append(report, fmt.Sprintf("\ttopic: %s", t.Topic)) + for _, p := range t.Partitions { + report = append(report, fmt.Sprintf("\t\tpartition %d: %d", p.Partition, p.Offset)) + } + } + l.Printf("committed offsets for group %s: \n%s", g.GroupID, strings.Join(report, "\n")) + }) + } + + return err +} + +// heartbeatLoop checks in with the consumer group coordinator at the provided +// interval. It exits if it ever encounters an error, which would signal the +// end of the generation. +func (g *Generation) heartbeatLoop(interval time.Duration) { + g.Start(func(ctx context.Context) { + g.log(func(l *log.Logger) { + l.Printf("started heartbeat for group, %v [%v]", g.GroupID, interval) + }) + defer g.log(func(l *log.Logger) { + l.Println("stopped heartbeat for group,", g.GroupID) + }) + + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + _, err := g.conn.heartbeat(heartbeatRequestV0{ + GroupID: g.GroupID, + GenerationID: g.ID, + MemberID: g.MemberID, + }) + if err != nil { + return + } + } + } + }) +} + +// partitionWatcher queries kafka and watches for partition changes, triggering +// a rebalance if changes are found. Similar to heartbeat it's okay to return on +// error here as if you are unable to ask a broker for basic metadata you're in +// a bad spot and should rebalance. Commonly you will see an error here if there +// is a problem with the connection to the coordinator and a rebalance will +// establish a new connection to the coordinator. +func (g *Generation) partitionWatcher(interval time.Duration, topic string) { + g.Start(func(ctx context.Context) { + g.log(func(l *log.Logger) { + l.Printf("started partition watcher for group, %v, topic %v [%v]", g.GroupID, topic, interval) + }) + defer g.log(func(l *log.Logger) { + l.Printf("stopped partition watcher for group, %v, topic %v", g.GroupID, topic) + }) + + ticker := time.NewTicker(interval) + defer ticker.Stop() + + ops, err := g.conn.ReadPartitions(topic) + if err != nil { + g.logError(func(l *log.Logger) { + l.Printf("Problem getting partitions during startup, %v\n, Returning and setting up nextGeneration", err) + }) + return + } + oParts := len(ops) + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + ops, err := g.conn.ReadPartitions(topic) + switch err { + case nil, UnknownTopicOrPartition: + if len(ops) != oParts { + g.log(func(l *log.Logger) { + l.Printf("Partition changes found, reblancing group: %v.", g.GroupID) + }) + return + } + default: + g.logError(func(l *log.Logger) { + l.Printf("Problem getting partitions while checking for changes, %v", err) + }) + if _, ok := err.(Error); ok { + continue + } + // other errors imply that we lost the connection to the coordinator, so we + // should abort and reconnect. + return + } + } + } + }) +} + +var _ coordinator = &Conn{} + +// coordinator is a subset of the functionality in Conn in order to facilitate +// testing the consumer group...especially for error conditions that are +// difficult to instigate with a live broker running in docker. +type coordinator interface { + io.Closer + findCoordinator(findCoordinatorRequestV0) (findCoordinatorResponseV0, error) + joinGroup(joinGroupRequestV1) (joinGroupResponseV1, error) + syncGroup(syncGroupRequestV0) (syncGroupResponseV0, error) + leaveGroup(leaveGroupRequestV0) (leaveGroupResponseV0, error) + heartbeat(heartbeatRequestV0) (heartbeatResponseV0, error) + offsetFetch(offsetFetchRequestV1) (offsetFetchResponseV1, error) + offsetCommit(offsetCommitRequestV2) (offsetCommitResponseV2, error) + ReadPartitions(...string) ([]Partition, error) +} + +// NewConsumerGroup creates a new ConsumerGroup. It returns an error if the +// provided configuration is invalid. It does not attempt to connect to the +// Kafka cluster. That happens asynchronously, and any errors will be reported +// by Next. +func NewConsumerGroup(config ConsumerGroupConfig) (*ConsumerGroup, error) { + if err := config.Validate(); err != nil { + return nil, err + } + + cg := &ConsumerGroup{ + config: config, + next: make(chan *Generation), + errs: make(chan error), + done: make(chan struct{}), + } + cg.wg.Add(1) + go func() { + cg.run() + cg.wg.Done() + }() + return cg, nil +} + +// ConsumerGroup models a Kafka consumer group. A caller doesn't interact with +// the group directly. Rather, they interact with a Generation. Every time a +// member enters or exits the group, it results in a new Generation. The +// Generation is where partition assignments and offset management occur. +// Callers will use Next to get a handle to the Generation. +type ConsumerGroup struct { + config ConsumerGroupConfig + next chan *Generation + errs chan error + + closeOnce sync.Once + wg sync.WaitGroup + done chan struct{} +} + +// Close terminates the current generation by causing this member to leave and +// releases all local resources used to participate in the consumer group. +// Close will also end the current generation if it is still active. +func (cg *ConsumerGroup) Close() error { + cg.closeOnce.Do(func() { + close(cg.done) + }) + cg.wg.Wait() + return nil +} + +// Next waits for the next consumer group generation. There will never be two +// active generations. Next will never return a new generation until the +// previous one has completed. +// +// If there are errors setting up the next generation, they will be surfaced +// here. +// +// If the ConsumerGroup has been closed, then Next will return ErrGroupClosed. +func (cg *ConsumerGroup) Next(ctx context.Context) (*Generation, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-cg.done: + return nil, ErrGroupClosed + case err := <-cg.errs: + return nil, err + case next := <-cg.next: + return next, nil + } +} + +func (cg *ConsumerGroup) run() { + // the memberID is the only piece of information that is maintained across + // generations. it starts empty and will be assigned on the first nextGeneration + // when the joinGroup request is processed. it may change again later if + // the CG coordinator fails over or if the member is evicted. otherwise, it + // will be constant for the lifetime of this group. + var memberID string + var err error + for { + memberID, err = cg.nextGeneration(memberID) + + // backoff will be set if this go routine should sleep before continuing + // to the next generation. it will be non-nil in the case of an error + // joining or syncing the group. + var backoff <-chan time.Time + switch err { + case nil: + // no error...the previous generation finished normally. + continue + case ErrGroupClosed: + // the CG has been closed...leave the group and exit loop. + _ = cg.leaveGroup(memberID) + return + case RebalanceInProgress: + // in case of a RebalanceInProgress, don't leave the group or + // change the member ID, but report the error. the next attempt + // to join the group will then be subject to the rebalance + // timeout, so the broker will be responsible for throttling + // this loop. + default: + // leave the group and report the error if we had gotten far + // enough so as to have a member ID. also clear the member id + // so we don't attempt to use it again. in order to avoid + // a tight error loop, backoff before the next attempt to join + // the group. + _ = cg.leaveGroup(memberID) + memberID = "" + backoff = time.After(cg.config.JoinGroupBackoff) + } + // ensure that we exit cleanly in case the CG is done and no one is + // waiting to receive on the unbuffered error channel. + select { + case <-cg.done: + return + case cg.errs <- err: + } + // backoff if needed, being sure to exit cleanly if the CG is done. + if backoff != nil { + select { + case <-cg.done: + // exit cleanly if the group is closed. + return + case <-backoff: + } + } + } +} + +func (cg *ConsumerGroup) nextGeneration(memberID string) (string, error) { + // get a new connection to the coordinator on each loop. the previous + // generation could have exited due to losing the connection, so this + // ensures that we always have a clean starting point. it means we will + // re-connect in certain cases, but that shouldn't be an issue given that + // rebalances are relatively infrequent under normal operating + // conditions. + conn, err := cg.coordinator() + if err != nil { + cg.withErrorLogger(func(log *log.Logger) { + log.Printf("Unable to establish connection to consumer group coordinator for group %s: %v", cg.config.ID, err) + }) + return memberID, err // a prior memberID may still be valid, so don't return "" + } + defer conn.Close() + + var generationID int32 + var groupAssignments GroupMemberAssignments + var assignments map[string][]int32 + + // join group. this will join the group and prepare assignments if our + // consumer is elected leader. it may also change or assign the member ID. + memberID, generationID, groupAssignments, err = cg.joinGroup(conn, memberID) + if err != nil { + cg.withErrorLogger(func(log *log.Logger) { + log.Printf("Failed to join group %s: %v", cg.config.ID, err) + }) + return memberID, err + } + cg.withLogger(func(log *log.Logger) { + log.Printf("Joined group %s as member %s in generation %d", cg.config.ID, memberID, generationID) + }) + + // sync group + assignments, err = cg.syncGroup(conn, memberID, generationID, groupAssignments) + if err != nil { + cg.withErrorLogger(func(log *log.Logger) { + log.Printf("Failed to sync group %s: %v", cg.config.ID, err) + }) + return memberID, err + } + + // fetch initial offsets. + var offsets map[string]map[int]int64 + offsets, err = cg.fetchOffsets(conn, assignments) + if err != nil { + cg.withErrorLogger(func(log *log.Logger) { + log.Printf("Failed to fetch offsets for group %s: %v", cg.config.ID, err) + }) + return memberID, err + } + + // create the generation. + gen := Generation{ + ID: generationID, + GroupID: cg.config.ID, + MemberID: memberID, + Assignments: cg.makeAssignments(assignments, offsets), + conn: conn, + done: make(chan struct{}), + retentionMillis: int64(cg.config.RetentionTime / time.Millisecond), + log: cg.withLogger, + logError: cg.withErrorLogger, + } + + // spawn all of the go routines required to facilitate this generation. if + // any of these functions exit, then the generation is determined to be + // complete. + gen.heartbeatLoop(cg.config.HeartbeatInterval) + if cg.config.WatchPartitionChanges { + for _, topic := range cg.config.Topics { + gen.partitionWatcher(cg.config.PartitionWatchInterval, topic) + } + } + + // make this generation available for retrieval. if the CG is closed before + // we can send it on the channel, exit. that case is required b/c the next + // channel is unbuffered. if the caller to Next has already bailed because + // it's own teardown logic has been invoked, this would deadlock otherwise. + select { + case <-cg.done: + gen.close() + return memberID, ErrGroupClosed // ErrGroupClosed will trigger leave logic. + case cg.next <- &gen: + } + + // wait for generation to complete. if the CG is closed before the + // generation is finished, exit and leave the group. + select { + case <-cg.done: + gen.close() + return memberID, ErrGroupClosed // ErrGroupClosed will trigger leave logic. + case <-gen.done: + // time for next generation! make sure all the current go routines exit + // before continuing onward. + gen.close() + return memberID, nil + } +} + +// connect returns a connection to ANY broker +func connect(dialer *Dialer, brokers ...string) (conn coordinator, err error) { + for _, broker := range brokers { + if conn, err = dialer.Dial("tcp", broker); err == nil { + return + } + } + return // err will be non-nil +} + +// coordinator establishes a connection to the coordinator for this consumer +// group. +func (cg *ConsumerGroup) coordinator() (coordinator, error) { + // NOTE : could try to cache the coordinator to avoid the double connect + // here. since consumer group balances happen infrequently and are + // an expensive operation, we're not currently optimizing that case + // in order to keep the code simpler. + conn, err := cg.config.connect(cg.config.Dialer, cg.config.Brokers...) + if err != nil { + return nil, err + } + defer conn.Close() + + out, err := conn.findCoordinator(findCoordinatorRequestV0{ + CoordinatorKey: cg.config.ID, + }) + if err == nil && out.ErrorCode != 0 { + err = Error(out.ErrorCode) + } + if err != nil { + return nil, err + } + + address := fmt.Sprintf("%v:%v", out.Coordinator.Host, out.Coordinator.Port) + return cg.config.connect(cg.config.Dialer, address) +} + +// joinGroup attempts to join the reader to the consumer group. +// Returns GroupMemberAssignments is this Reader was selected as +// the leader. Otherwise, GroupMemberAssignments will be nil. +// +// Possible kafka error codes returned: +// * GroupLoadInProgress: +// * GroupCoordinatorNotAvailable: +// * NotCoordinatorForGroup: +// * InconsistentGroupProtocol: +// * InvalidSessionTimeout: +// * GroupAuthorizationFailed: +func (cg *ConsumerGroup) joinGroup(conn coordinator, memberID string) (string, int32, GroupMemberAssignments, error) { + request, err := cg.makeJoinGroupRequestV1(memberID) + if err != nil { + return "", 0, nil, err + } + + response, err := conn.joinGroup(request) + if err == nil && response.ErrorCode != 0 { + err = Error(response.ErrorCode) + } + if err != nil { + return "", 0, nil, err + } + + memberID = response.MemberID + generationID := response.GenerationID + + cg.withLogger(func(l *log.Logger) { + l.Printf("joined group %s as member %s in generation %d", cg.config.ID, memberID, generationID) + }) + + var assignments GroupMemberAssignments + if iAmLeader := response.MemberID == response.LeaderID; iAmLeader { + v, err := cg.assignTopicPartitions(conn, response) + if err != nil { + return memberID, 0, nil, err + } + assignments = v + + cg.withLogger(func(l *log.Logger) { + for memberID, assignment := range assignments { + for topic, partitions := range assignment { + l.Printf("assigned member/topic/partitions %v/%v/%v", memberID, topic, partitions) + } + } + }) + } + + cg.withLogger(func(l *log.Logger) { + l.Printf("joinGroup succeeded for response, %v. generationID=%v, memberID=%v", cg.config.ID, response.GenerationID, response.MemberID) + }) + + return memberID, generationID, assignments, nil +} + +// makeJoinGroupRequestV1 handles the logic of constructing a joinGroup +// request +func (cg *ConsumerGroup) makeJoinGroupRequestV1(memberID string) (joinGroupRequestV1, error) { + request := joinGroupRequestV1{ + GroupID: cg.config.ID, + MemberID: memberID, + SessionTimeout: int32(cg.config.SessionTimeout / time.Millisecond), + RebalanceTimeout: int32(cg.config.RebalanceTimeout / time.Millisecond), + ProtocolType: defaultProtocolType, + } + + for _, balancer := range cg.config.GroupBalancers { + userData, err := balancer.UserData() + if err != nil { + return joinGroupRequestV1{}, fmt.Errorf("unable to construct protocol metadata for member, %v: %v", balancer.ProtocolName(), err) + } + request.GroupProtocols = append(request.GroupProtocols, joinGroupRequestGroupProtocolV1{ + ProtocolName: balancer.ProtocolName(), + ProtocolMetadata: groupMetadata{ + Version: 1, + Topics: cg.config.Topics, + UserData: userData, + }.bytes(), + }) + } + + return request, nil +} + +// assignTopicPartitions uses the selected GroupBalancer to assign members to +// their various partitions +func (cg *ConsumerGroup) assignTopicPartitions(conn coordinator, group joinGroupResponseV1) (GroupMemberAssignments, error) { + cg.withLogger(func(l *log.Logger) { + l.Println("selected as leader for group,", cg.config.ID) + }) + + balancer, ok := findGroupBalancer(group.GroupProtocol, cg.config.GroupBalancers) + if !ok { + // NOTE : this shouldn't happen in practice...the broker should not + // return successfully from joinGroup unless all members support + // at least one common protocol. + return nil, fmt.Errorf("unable to find selected balancer, %v, for group, %v", group.GroupProtocol, cg.config.ID) + } + + members, err := cg.makeMemberProtocolMetadata(group.Members) + if err != nil { + return nil, err + } + + topics := extractTopics(members) + partitions, err := conn.ReadPartitions(topics...) + + // it's not a failure if the topic doesn't exist yet. it results in no + // assignments for the topic. this matches the behavior of the official + // clients: java, python, and librdkafka. + // a topic watcher can trigger a rebalance when the topic comes into being. + if err != nil && err != UnknownTopicOrPartition { + return nil, err + } + + cg.withLogger(func(l *log.Logger) { + l.Printf("using '%v' balancer to assign group, %v", group.GroupProtocol, cg.config.ID) + for _, member := range members { + l.Printf("found member: %v/%#v", member.ID, member.UserData) + } + for _, partition := range partitions { + l.Printf("found topic/partition: %v/%v", partition.Topic, partition.ID) + } + }) + + return balancer.AssignGroups(members, partitions), nil +} + +// makeMemberProtocolMetadata maps encoded member metadata ([]byte) into []GroupMember +func (cg *ConsumerGroup) makeMemberProtocolMetadata(in []joinGroupResponseMemberV1) ([]GroupMember, error) { + members := make([]GroupMember, 0, len(in)) + for _, item := range in { + metadata := groupMetadata{} + reader := bufio.NewReader(bytes.NewReader(item.MemberMetadata)) + if remain, err := (&metadata).readFrom(reader, len(item.MemberMetadata)); err != nil || remain != 0 { + return nil, fmt.Errorf("unable to read metadata for member, %v: %v", item.MemberID, err) + } + + members = append(members, GroupMember{ + ID: item.MemberID, + Topics: metadata.Topics, + UserData: metadata.UserData, + }) + } + return members, nil +} + +// syncGroup completes the consumer group nextGeneration by accepting the +// memberAssignments (if this Reader is the leader) and returning this +// Readers subscriptions topic => partitions +// +// Possible kafka error codes returned: +// * GroupCoordinatorNotAvailable: +// * NotCoordinatorForGroup: +// * IllegalGeneration: +// * RebalanceInProgress: +// * GroupAuthorizationFailed: +func (cg *ConsumerGroup) syncGroup(conn coordinator, memberID string, generationID int32, memberAssignments GroupMemberAssignments) (map[string][]int32, error) { + request := cg.makeSyncGroupRequestV0(memberID, generationID, memberAssignments) + response, err := conn.syncGroup(request) + if err == nil && response.ErrorCode != 0 { + err = Error(response.ErrorCode) + } + if err != nil { + return nil, err + } + + assignments := groupAssignment{} + reader := bufio.NewReader(bytes.NewReader(response.MemberAssignments)) + if _, err := (&assignments).readFrom(reader, len(response.MemberAssignments)); err != nil { + return nil, err + } + + if len(assignments.Topics) == 0 { + cg.withLogger(func(l *log.Logger) { + l.Printf("received empty assignments for group, %v as member %s for generation %d", cg.config.ID, memberID, generationID) + }) + } + + cg.withLogger(func(l *log.Logger) { + l.Printf("sync group finished for group, %v", cg.config.ID) + }) + + return assignments.Topics, nil +} + +func (cg *ConsumerGroup) makeSyncGroupRequestV0(memberID string, generationID int32, memberAssignments GroupMemberAssignments) syncGroupRequestV0 { + request := syncGroupRequestV0{ + GroupID: cg.config.ID, + GenerationID: generationID, + MemberID: memberID, + } + + if memberAssignments != nil { + request.GroupAssignments = make([]syncGroupRequestGroupAssignmentV0, 0, 1) + + for memberID, topics := range memberAssignments { + topics32 := make(map[string][]int32) + for topic, partitions := range topics { + partitions32 := make([]int32, len(partitions)) + for i := range partitions { + partitions32[i] = int32(partitions[i]) + } + topics32[topic] = partitions32 + } + request.GroupAssignments = append(request.GroupAssignments, syncGroupRequestGroupAssignmentV0{ + MemberID: memberID, + MemberAssignments: groupAssignment{ + Version: 1, + Topics: topics32, + }.bytes(), + }) + } + + cg.withErrorLogger(func(logger *log.Logger) { + logger.Printf("Syncing %d assignments for generation %d as member %s", len(request.GroupAssignments), generationID, memberID) + }) + } + + return request +} + +func (cg *ConsumerGroup) fetchOffsets(conn coordinator, subs map[string][]int32) (map[string]map[int]int64, error) { + req := offsetFetchRequestV1{ + GroupID: cg.config.ID, + Topics: make([]offsetFetchRequestV1Topic, 0, len(cg.config.Topics)), + } + for _, topic := range cg.config.Topics { + req.Topics = append(req.Topics, offsetFetchRequestV1Topic{ + Topic: topic, + Partitions: subs[topic], + }) + } + offsets, err := conn.offsetFetch(req) + if err != nil { + return nil, err + } + + offsetsByTopic := make(map[string]map[int]int64) + for _, res := range offsets.Responses { + offsetsByPartition := map[int]int64{} + offsetsByTopic[res.Topic] = offsetsByPartition + for _, pr := range res.PartitionResponses { + for _, partition := range subs[res.Topic] { + if partition == pr.Partition { + offset := pr.Offset + if offset < 0 { + offset = cg.config.StartOffset + } + offsetsByPartition[int(partition)] = offset + } + } + } + } + + return offsetsByTopic, nil +} + +func (cg *ConsumerGroup) makeAssignments(assignments map[string][]int32, offsets map[string]map[int]int64) map[string][]PartitionAssignment { + topicAssignments := make(map[string][]PartitionAssignment) + for _, topic := range cg.config.Topics { + topicPartitions := assignments[topic] + topicAssignments[topic] = make([]PartitionAssignment, 0, len(topicPartitions)) + for _, partition := range topicPartitions { + var offset int64 + partitionOffsets, ok := offsets[topic] + if ok { + offset, ok = partitionOffsets[int(partition)] + } + if !ok { + offset = cg.config.StartOffset + } + topicAssignments[topic] = append(topicAssignments[topic], PartitionAssignment{ + ID: int(partition), + Offset: offset, + }) + } + } + return topicAssignments +} + +func (cg *ConsumerGroup) leaveGroup(memberID string) error { + // don't attempt to leave the group if no memberID was ever assigned. + if memberID == "" { + return nil + } + + cg.withLogger(func(log *log.Logger) { + log.Printf("Leaving group %s, member %s", cg.config.ID, memberID) + }) + + // IMPORTANT : leaveGroup establishes its own connection to the coordinator + // because it is often called after some other operation failed. + // said failure could be the result of connection-level issues, + // so we want to re-establish the connection to ensure that we + // are able to process the cleanup step. + coordinator, err := cg.coordinator() + if err != nil { + return err + } + + _, err = coordinator.leaveGroup(leaveGroupRequestV0{ + GroupID: cg.config.ID, + MemberID: memberID, + }) + if err != nil { + cg.withErrorLogger(func(log *log.Logger) { + log.Printf("leave group failed for group, %v, and member, %v: %v", cg.config.ID, memberID, err) + }) + } + + _ = coordinator.Close() + + return err +} + +func (cg *ConsumerGroup) withLogger(do func(*log.Logger)) { + if cg.config.Logger != nil { + do(cg.config.Logger) + } +} + +func (cg *ConsumerGroup) withErrorLogger(do func(*log.Logger)) { + if cg.config.ErrorLogger != nil { + do(cg.config.ErrorLogger) + } else { + cg.withLogger(do) + } +} diff --git a/vendor/github.com/segmentio/kafka-go/crc32.go b/vendor/github.com/segmentio/kafka-go/crc32.go index f1a617f02..fef683428 100644 --- a/vendor/github.com/segmentio/kafka-go/crc32.go +++ b/vendor/github.com/segmentio/kafka-go/crc32.go @@ -1,80 +1,55 @@ package kafka import ( - "bytes" "encoding/binary" "hash/crc32" - "sync" ) -func crc32OfMessage(magicByte int8, attributes int8, timestamp int64, key []byte, value []byte) uint32 { - b := acquireCrc32Buffer() - b.writeInt8(magicByte) - b.writeInt8(attributes) - if magicByte != 0 { - b.writeInt64(timestamp) - } - b.writeBytes(key) - b.writeBytes(value) - sum := b.sum - releaseCrc32Buffer(b) - return sum -} - -type crc32Buffer struct { - sum uint32 - buf bytes.Buffer +type crc32Writer struct { + table *crc32.Table + buffer [8]byte + crc32 uint32 } -func (c *crc32Buffer) writeInt8(i int8) { - c.buf.Truncate(0) - c.buf.WriteByte(byte(i)) - c.update() +func (w *crc32Writer) update(b []byte) { + w.crc32 = crc32.Update(w.crc32, w.table, b) } -func (c *crc32Buffer) writeInt32(i int32) { - a := [4]byte{} - binary.BigEndian.PutUint32(a[:], uint32(i)) - c.buf.Truncate(0) - c.buf.Write(a[:]) - c.update() +func (w *crc32Writer) writeInt8(i int8) { + w.buffer[0] = byte(i) + w.update(w.buffer[:1]) } -func (c *crc32Buffer) writeInt64(i int64) { - a := [8]byte{} - binary.BigEndian.PutUint64(a[:], uint64(i)) - c.buf.Truncate(0) - c.buf.Write(a[:]) - c.update() +func (w *crc32Writer) writeInt16(i int16) { + binary.BigEndian.PutUint16(w.buffer[:2], uint16(i)) + w.update(w.buffer[:2]) } -func (c *crc32Buffer) writeBytes(b []byte) { - if b == nil { - c.writeInt32(-1) - } else { - c.writeInt32(int32(len(b))) - } - c.sum = crc32Update(c.sum, b) +func (w *crc32Writer) writeInt32(i int32) { + binary.BigEndian.PutUint32(w.buffer[:4], uint32(i)) + w.update(w.buffer[:4]) } -func (c *crc32Buffer) update() { - c.sum = crc32Update(c.sum, c.buf.Bytes()) +func (w *crc32Writer) writeInt64(i int64) { + binary.BigEndian.PutUint64(w.buffer[:8], uint64(i)) + w.update(w.buffer[:8]) } -func crc32Update(sum uint32, b []byte) uint32 { - return crc32.Update(sum, crc32.IEEETable, b) -} - -var crc32BufferPool = sync.Pool{ - New: func() interface{} { return &crc32Buffer{} }, +func (w *crc32Writer) writeBytes(b []byte) { + n := len(b) + if b == nil { + n = -1 + } + w.writeInt32(int32(n)) + w.update(b) } -func acquireCrc32Buffer() *crc32Buffer { - c := crc32BufferPool.Get().(*crc32Buffer) - c.sum = 0 - return c +func (w *crc32Writer) Write(b []byte) (int, error) { + w.update(b) + return len(b), nil } -func releaseCrc32Buffer(b *crc32Buffer) { - crc32BufferPool.Put(b) +func (w *crc32Writer) WriteString(s string) (int, error) { + w.update([]byte(s)) + return len(s), nil } diff --git a/vendor/github.com/segmentio/kafka-go/createtopics.go b/vendor/github.com/segmentio/kafka-go/createtopics.go index a3cccb03d..a38e0801d 100644 --- a/vendor/github.com/segmentio/kafka-go/createtopics.go +++ b/vendor/github.com/segmentio/kafka-go/createtopics.go @@ -10,26 +10,26 @@ type ConfigEntry struct { ConfigValue string } -func (c ConfigEntry) toCreateTopicsRequestV2ConfigEntry() createTopicsRequestV2ConfigEntry { - return createTopicsRequestV2ConfigEntry{ +func (c ConfigEntry) toCreateTopicsRequestV0ConfigEntry() createTopicsRequestV0ConfigEntry { + return createTopicsRequestV0ConfigEntry{ ConfigName: c.ConfigName, ConfigValue: c.ConfigValue, } } -type createTopicsRequestV2ConfigEntry struct { +type createTopicsRequestV0ConfigEntry struct { ConfigName string ConfigValue string } -func (t createTopicsRequestV2ConfigEntry) size() int32 { +func (t createTopicsRequestV0ConfigEntry) size() int32 { return sizeofString(t.ConfigName) + sizeofString(t.ConfigValue) } -func (t createTopicsRequestV2ConfigEntry) writeTo(w *bufio.Writer) { - writeString(w, t.ConfigName) - writeString(w, t.ConfigValue) +func (t createTopicsRequestV0ConfigEntry) writeTo(wb *writeBuffer) { + wb.writeString(t.ConfigName) + wb.writeString(t.ConfigValue) } type ReplicaAssignment struct { @@ -37,26 +37,26 @@ type ReplicaAssignment struct { Replicas int } -func (a ReplicaAssignment) toCreateTopicsRequestV2ReplicaAssignment() createTopicsRequestV2ReplicaAssignment { - return createTopicsRequestV2ReplicaAssignment{ +func (a ReplicaAssignment) toCreateTopicsRequestV0ReplicaAssignment() createTopicsRequestV0ReplicaAssignment { + return createTopicsRequestV0ReplicaAssignment{ Partition: int32(a.Partition), Replicas: int32(a.Replicas), } } -type createTopicsRequestV2ReplicaAssignment struct { +type createTopicsRequestV0ReplicaAssignment struct { Partition int32 Replicas int32 } -func (t createTopicsRequestV2ReplicaAssignment) size() int32 { +func (t createTopicsRequestV0ReplicaAssignment) size() int32 { return sizeofInt32(t.Partition) + sizeofInt32(t.Replicas) } -func (t createTopicsRequestV2ReplicaAssignment) writeTo(w *bufio.Writer) { - writeInt32(w, t.Partition) - writeInt32(w, t.Replicas) +func (t createTopicsRequestV0ReplicaAssignment) writeTo(wb *writeBuffer) { + wb.writeInt32(t.Partition) + wb.writeInt32(t.Replicas) } type TopicConfig struct { @@ -77,30 +77,30 @@ type TopicConfig struct { ConfigEntries []ConfigEntry } -func (t TopicConfig) toCreateTopicsRequestV2Topic() createTopicsRequestV2Topic { - var requestV2ReplicaAssignments []createTopicsRequestV2ReplicaAssignment +func (t TopicConfig) toCreateTopicsRequestV0Topic() createTopicsRequestV0Topic { + var requestV0ReplicaAssignments []createTopicsRequestV0ReplicaAssignment for _, a := range t.ReplicaAssignments { - requestV2ReplicaAssignments = append( - requestV2ReplicaAssignments, - a.toCreateTopicsRequestV2ReplicaAssignment()) + requestV0ReplicaAssignments = append( + requestV0ReplicaAssignments, + a.toCreateTopicsRequestV0ReplicaAssignment()) } - var requestV2ConfigEntries []createTopicsRequestV2ConfigEntry + var requestV0ConfigEntries []createTopicsRequestV0ConfigEntry for _, c := range t.ConfigEntries { - requestV2ConfigEntries = append( - requestV2ConfigEntries, - c.toCreateTopicsRequestV2ConfigEntry()) + requestV0ConfigEntries = append( + requestV0ConfigEntries, + c.toCreateTopicsRequestV0ConfigEntry()) } - return createTopicsRequestV2Topic{ + return createTopicsRequestV0Topic{ Topic: t.Topic, NumPartitions: int32(t.NumPartitions), ReplicationFactor: int16(t.ReplicationFactor), - ReplicaAssignments: requestV2ReplicaAssignments, - ConfigEntries: requestV2ConfigEntries, + ReplicaAssignments: requestV0ReplicaAssignments, + ConfigEntries: requestV0ConfigEntries, } } -type createTopicsRequestV2Topic struct { +type createTopicsRequestV0Topic struct { // Topic name Topic string @@ -112,13 +112,13 @@ type createTopicsRequestV2Topic struct { // ReplicaAssignments among kafka brokers for this topic partitions. If this // is set num_partitions and replication_factor must be unset. - ReplicaAssignments []createTopicsRequestV2ReplicaAssignment + ReplicaAssignments []createTopicsRequestV0ReplicaAssignment // ConfigEntries holds topic level configuration for topic to be set. - ConfigEntries []createTopicsRequestV2ConfigEntry + ConfigEntries []createTopicsRequestV0ConfigEntry } -func (t createTopicsRequestV2Topic) size() int32 { +func (t createTopicsRequestV0Topic) size() int32 { return sizeofString(t.Topic) + sizeofInt32(t.NumPartitions) + sizeofInt16(t.ReplicationFactor) + @@ -126,115 +126,94 @@ func (t createTopicsRequestV2Topic) size() int32 { sizeofArray(len(t.ConfigEntries), func(i int) int32 { return t.ConfigEntries[i].size() }) } -func (t createTopicsRequestV2Topic) writeTo(w *bufio.Writer) { - writeString(w, t.Topic) - writeInt32(w, t.NumPartitions) - writeInt16(w, t.ReplicationFactor) - writeArray(w, len(t.ReplicaAssignments), func(i int) { t.ReplicaAssignments[i].writeTo(w) }) - writeArray(w, len(t.ConfigEntries), func(i int) { t.ConfigEntries[i].writeTo(w) }) +func (t createTopicsRequestV0Topic) writeTo(wb *writeBuffer) { + wb.writeString(t.Topic) + wb.writeInt32(t.NumPartitions) + wb.writeInt16(t.ReplicationFactor) + wb.writeArray(len(t.ReplicaAssignments), func(i int) { t.ReplicaAssignments[i].writeTo(wb) }) + wb.writeArray(len(t.ConfigEntries), func(i int) { t.ConfigEntries[i].writeTo(wb) }) } // See http://kafka.apache.org/protocol.html#The_Messages_CreateTopics -type createTopicsRequestV2 struct { +type createTopicsRequestV0 struct { // Topics contains n array of single topic creation requests. Can not // have multiple entries for the same topic. - Topics []createTopicsRequestV2Topic + Topics []createTopicsRequestV0Topic // Timeout ms to wait for a topic to be completely created on the // controller node. Values <= 0 will trigger topic creation and return immediately Timeout int32 - - // ValidateOnly if true, the request will be validated, but the topic won - // 't be created. - ValidateOnly bool } -func (t createTopicsRequestV2) size() int32 { +func (t createTopicsRequestV0) size() int32 { return sizeofArray(len(t.Topics), func(i int) int32 { return t.Topics[i].size() }) + - sizeofInt32(t.Timeout) + - sizeofBool(t.ValidateOnly) + sizeofInt32(t.Timeout) } -func (t createTopicsRequestV2) writeTo(w *bufio.Writer) { - writeArray(w, len(t.Topics), func(i int) { t.Topics[i].writeTo(w) }) - writeInt32(w, t.Timeout) - writeBool(w, t.ValidateOnly) +func (t createTopicsRequestV0) writeTo(wb *writeBuffer) { + wb.writeArray(len(t.Topics), func(i int) { t.Topics[i].writeTo(wb) }) + wb.writeInt32(t.Timeout) } -type createTopicsResponseV2TopicError struct { +type createTopicsResponseV0TopicError struct { // Topic name Topic string // ErrorCode holds response error code ErrorCode int16 - - // ErrorMessage holds the response error message - ErrorMessage string } -func (t createTopicsResponseV2TopicError) size() int32 { +func (t createTopicsResponseV0TopicError) size() int32 { return sizeofString(t.Topic) + - sizeofInt16(t.ErrorCode) + - sizeofString(t.ErrorMessage) + sizeofInt16(t.ErrorCode) } -func (t createTopicsResponseV2TopicError) writeTo(w *bufio.Writer) { - writeString(w, t.Topic) - writeInt16(w, t.ErrorCode) - writeString(w, t.ErrorMessage) +func (t createTopicsResponseV0TopicError) writeTo(wb *writeBuffer) { + wb.writeString(t.Topic) + wb.writeInt16(t.ErrorCode) } -func (t *createTopicsResponseV2TopicError) readFrom(r *bufio.Reader, size int) (remain int, err error) { +func (t *createTopicsResponseV0TopicError) readFrom(r *bufio.Reader, size int) (remain int, err error) { if remain, err = readString(r, size, &t.Topic); err != nil { return } if remain, err = readInt16(r, remain, &t.ErrorCode); err != nil { return } - if remain, err = readString(r, remain, &t.ErrorMessage); err != nil { - return - } return } // See http://kafka.apache.org/protocol.html#The_Messages_CreateTopics -type createTopicsResponseV2 struct { - ThrottleTimeMS int32 - TopicErrors []createTopicsResponseV2TopicError +type createTopicsResponseV0 struct { + TopicErrors []createTopicsResponseV0TopicError } -func (t createTopicsResponseV2) size() int32 { - return sizeofInt32(t.ThrottleTimeMS) + - sizeofArray(len(t.TopicErrors), func(i int) int32 { return t.TopicErrors[i].size() }) +func (t createTopicsResponseV0) size() int32 { + return sizeofArray(len(t.TopicErrors), func(i int) int32 { return t.TopicErrors[i].size() }) } -func (t createTopicsResponseV2) writeTo(w *bufio.Writer) { - writeInt32(w, t.ThrottleTimeMS) - writeArray(w, len(t.TopicErrors), func(i int) { t.TopicErrors[i].writeTo(w) }) +func (t createTopicsResponseV0) writeTo(wb *writeBuffer) { + wb.writeArray(len(t.TopicErrors), func(i int) { t.TopicErrors[i].writeTo(wb) }) } -func (t *createTopicsResponseV2) readFrom(r *bufio.Reader, size int) (remain int, err error) { - if remain, err = readInt32(r, size, &t.ThrottleTimeMS); err != nil { - return - } - +func (t *createTopicsResponseV0) readFrom(r *bufio.Reader, size int) (remain int, err error) { fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { - var topic createTopicsResponseV2TopicError + var topic createTopicsResponseV0TopicError if fnRemain, fnErr = (&topic).readFrom(r, size); err != nil { return } t.TopicErrors = append(t.TopicErrors, topic) return } - if remain, err = readArrayWith(r, remain, fn); err != nil { + if remain, err = readArrayWith(r, size, fn); err != nil { return } return } -func (c *Conn) createTopics(request createTopicsRequestV2) (createTopicsResponseV2, error) { - var response createTopicsResponseV2 +func (c *Conn) createTopics(request createTopicsRequestV0) (createTopicsResponseV0, error) { + var response createTopicsResponseV0 err := c.writeOperation( func(deadline time.Time, id int32) error { @@ -243,7 +222,7 @@ func (c *Conn) createTopics(request createTopicsRequestV2) (createTopicsResponse deadline = adjustDeadlineForRTT(deadline, now, defaultRTT) request.Timeout = milliseconds(deadlineToTimeout(deadline, now)) } - return c.writeRequest(createTopicsRequest, v2, id, request) + return c.writeRequest(createTopicsRequest, v0, id, request) }, func(deadline time.Time, size int) error { return expectZeroSize(func() (remain int, err error) { @@ -267,15 +246,15 @@ func (c *Conn) createTopics(request createTopicsRequestV2) (createTopicsResponse // operational semantics. In other words, if CreateTopics is invoked with a // configuration for an existing topic, it will have no effect. func (c *Conn) CreateTopics(topics ...TopicConfig) error { - var requestV2Topics []createTopicsRequestV2Topic + var requestV0Topics []createTopicsRequestV0Topic for _, t := range topics { - requestV2Topics = append( - requestV2Topics, - t.toCreateTopicsRequestV2Topic()) + requestV0Topics = append( + requestV0Topics, + t.toCreateTopicsRequestV0Topic()) } - _, err := c.createTopics(createTopicsRequestV2{ - Topics: requestV2Topics, + _, err := c.createTopics(createTopicsRequestV0{ + Topics: requestV0Topics, }) switch err { diff --git a/vendor/github.com/segmentio/kafka-go/deletetopics.go b/vendor/github.com/segmentio/kafka-go/deletetopics.go index 983bff7a0..c0af87db1 100644 --- a/vendor/github.com/segmentio/kafka-go/deletetopics.go +++ b/vendor/github.com/segmentio/kafka-go/deletetopics.go @@ -6,7 +6,7 @@ import ( ) // See http://kafka.apache.org/protocol.html#The_Messages_DeleteTopics -type deleteTopicsRequestV1 struct { +type deleteTopicsRequestV0 struct { // Topics holds the topic names Topics []string @@ -16,55 +16,45 @@ type deleteTopicsRequestV1 struct { Timeout int32 } -func (t deleteTopicsRequestV1) size() int32 { +func (t deleteTopicsRequestV0) size() int32 { return sizeofStringArray(t.Topics) + sizeofInt32(t.Timeout) } -func (t deleteTopicsRequestV1) writeTo(w *bufio.Writer) { - writeStringArray(w, t.Topics) - writeInt32(w, t.Timeout) +func (t deleteTopicsRequestV0) writeTo(wb *writeBuffer) { + wb.writeStringArray(t.Topics) + wb.writeInt32(t.Timeout) } -type deleteTopicsResponseV1 struct { - // ThrottleTimeMS holds the duration in milliseconds for which the request - // was throttled due to quota violation (Zero if the request did not violate - // any quota) - ThrottleTimeMS int32 - +type deleteTopicsResponseV0 struct { // TopicErrorCodes holds per topic error codes - TopicErrorCodes []deleteTopicsResponseV1TopicErrorCode + TopicErrorCodes []deleteTopicsResponseV0TopicErrorCode } -func (t deleteTopicsResponseV1) size() int32 { - return sizeofInt32(t.ThrottleTimeMS) + - sizeofArray(len(t.TopicErrorCodes), func(i int) int32 { return t.TopicErrorCodes[i].size() }) +func (t deleteTopicsResponseV0) size() int32 { + return sizeofArray(len(t.TopicErrorCodes), func(i int) int32 { return t.TopicErrorCodes[i].size() }) } -func (t *deleteTopicsResponseV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { - if remain, err = readInt32(r, size, &t.ThrottleTimeMS); err != nil { - return - } +func (t *deleteTopicsResponseV0) readFrom(r *bufio.Reader, size int) (remain int, err error) { fn := func(withReader *bufio.Reader, withSize int) (fnRemain int, fnErr error) { - var item deleteTopicsResponseV1TopicErrorCode + var item deleteTopicsResponseV0TopicErrorCode if fnRemain, fnErr = (&item).readFrom(withReader, withSize); err != nil { return } t.TopicErrorCodes = append(t.TopicErrorCodes, item) return } - if remain, err = readArrayWith(r, remain, fn); err != nil { + if remain, err = readArrayWith(r, size, fn); err != nil { return } return } -func (t deleteTopicsResponseV1) writeTo(w *bufio.Writer) { - writeInt32(w, t.ThrottleTimeMS) - writeArray(w, len(t.TopicErrorCodes), func(i int) { t.TopicErrorCodes[i].writeTo(w) }) +func (t deleteTopicsResponseV0) writeTo(wb *writeBuffer) { + wb.writeArray(len(t.TopicErrorCodes), func(i int) { t.TopicErrorCodes[i].writeTo(wb) }) } -type deleteTopicsResponseV1TopicErrorCode struct { +type deleteTopicsResponseV0TopicErrorCode struct { // Topic holds the topic name Topic string @@ -72,12 +62,12 @@ type deleteTopicsResponseV1TopicErrorCode struct { ErrorCode int16 } -func (t deleteTopicsResponseV1TopicErrorCode) size() int32 { +func (t deleteTopicsResponseV0TopicErrorCode) size() int32 { return sizeofString(t.Topic) + sizeofInt16(t.ErrorCode) } -func (t *deleteTopicsResponseV1TopicErrorCode) readFrom(r *bufio.Reader, size int) (remain int, err error) { +func (t *deleteTopicsResponseV0TopicErrorCode) readFrom(r *bufio.Reader, size int) (remain int, err error) { if remain, err = readString(r, size, &t.Topic); err != nil { return } @@ -87,16 +77,16 @@ func (t *deleteTopicsResponseV1TopicErrorCode) readFrom(r *bufio.Reader, size in return } -func (t deleteTopicsResponseV1TopicErrorCode) writeTo(w *bufio.Writer) { - writeString(w, t.Topic) - writeInt16(w, t.ErrorCode) +func (t deleteTopicsResponseV0TopicErrorCode) writeTo(wb *writeBuffer) { + wb.writeString(t.Topic) + wb.writeInt16(t.ErrorCode) } // deleteTopics deletes the specified topics. // // See http://kafka.apache.org/protocol.html#The_Messages_DeleteTopics -func (c *Conn) deleteTopics(request deleteTopicsRequestV1) (deleteTopicsResponseV1, error) { - var response deleteTopicsResponseV1 +func (c *Conn) deleteTopics(request deleteTopicsRequestV0) (deleteTopicsResponseV0, error) { + var response deleteTopicsResponseV0 err := c.writeOperation( func(deadline time.Time, id int32) error { if request.Timeout == 0 { @@ -104,7 +94,7 @@ func (c *Conn) deleteTopics(request deleteTopicsRequestV1) (deleteTopicsResponse deadline = adjustDeadlineForRTT(deadline, now, defaultRTT) request.Timeout = milliseconds(deadlineToTimeout(deadline, now)) } - return c.writeRequest(deleteTopicsRequest, v1, id, request) + return c.writeRequest(deleteTopicsRequest, v0, id, request) }, func(deadline time.Time, size int) error { return expectZeroSize(func() (remain int, err error) { @@ -113,7 +103,7 @@ func (c *Conn) deleteTopics(request deleteTopicsRequestV1) (deleteTopicsResponse }, ) if err != nil { - return deleteTopicsResponseV1{}, err + return deleteTopicsResponseV0{}, err } for _, c := range response.TopicErrorCodes { if c.ErrorCode != 0 { diff --git a/vendor/github.com/segmentio/kafka-go/describegroups.go b/vendor/github.com/segmentio/kafka-go/describegroups.go index a7b7982ac..de9955a30 100644 --- a/vendor/github.com/segmentio/kafka-go/describegroups.go +++ b/vendor/github.com/segmentio/kafka-go/describegroups.go @@ -3,21 +3,21 @@ package kafka import "bufio" // See http://kafka.apache.org/protocol.html#The_Messages_DescribeGroups -type describeGroupsRequestV1 struct { +type describeGroupsRequestV0 struct { // List of groupIds to request metadata for (an empty groupId array // will return empty group metadata). GroupIDs []string } -func (t describeGroupsRequestV1) size() int32 { +func (t describeGroupsRequestV0) size() int32 { return sizeofStringArray(t.GroupIDs) } -func (t describeGroupsRequestV1) writeTo(w *bufio.Writer) { - writeStringArray(w, t.GroupIDs) +func (t describeGroupsRequestV0) writeTo(wb *writeBuffer) { + wb.writeStringArray(t.GroupIDs) } -type describeGroupsResponseMemberV1 struct { +type describeGroupsResponseMemberV0 struct { // MemberID assigned by the group coordinator MemberID string @@ -39,7 +39,7 @@ type describeGroupsResponseMemberV1 struct { MemberAssignments []byte } -func (t describeGroupsResponseMemberV1) size() int32 { +func (t describeGroupsResponseMemberV0) size() int32 { return sizeofString(t.MemberID) + sizeofString(t.ClientID) + sizeofString(t.ClientHost) + @@ -47,15 +47,15 @@ func (t describeGroupsResponseMemberV1) size() int32 { sizeofBytes(t.MemberAssignments) } -func (t describeGroupsResponseMemberV1) writeTo(w *bufio.Writer) { - writeString(w, t.MemberID) - writeString(w, t.ClientID) - writeString(w, t.ClientHost) - writeBytes(w, t.MemberMetadata) - writeBytes(w, t.MemberAssignments) +func (t describeGroupsResponseMemberV0) writeTo(wb *writeBuffer) { + wb.writeString(t.MemberID) + wb.writeString(t.ClientID) + wb.writeString(t.ClientHost) + wb.writeBytes(t.MemberMetadata) + wb.writeBytes(t.MemberAssignments) } -func (t *describeGroupsResponseMemberV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { +func (t *describeGroupsResponseMemberV0) readFrom(r *bufio.Reader, size int) (remain int, err error) { if remain, err = readString(r, size, &t.MemberID); err != nil { return } @@ -74,7 +74,7 @@ func (t *describeGroupsResponseMemberV1) readFrom(r *bufio.Reader, size int) (re return } -type describeGroupsResponseGroupV1 struct { +type describeGroupsResponseGroupV0 struct { // ErrorCode holds response error code ErrorCode int16 @@ -93,10 +93,10 @@ type describeGroupsResponseGroupV1 struct { Protocol string // Members contains the current group members (only provided if the group is not Dead) - Members []describeGroupsResponseMemberV1 + Members []describeGroupsResponseMemberV0 } -func (t describeGroupsResponseGroupV1) size() int32 { +func (t describeGroupsResponseGroupV0) size() int32 { return sizeofInt16(t.ErrorCode) + sizeofString(t.GroupID) + sizeofString(t.State) + @@ -105,16 +105,16 @@ func (t describeGroupsResponseGroupV1) size() int32 { sizeofArray(len(t.Members), func(i int) int32 { return t.Members[i].size() }) } -func (t describeGroupsResponseGroupV1) writeTo(w *bufio.Writer) { - writeInt16(w, t.ErrorCode) - writeString(w, t.GroupID) - writeString(w, t.State) - writeString(w, t.ProtocolType) - writeString(w, t.Protocol) - writeArray(w, len(t.Members), func(i int) { t.Members[i].writeTo(w) }) +func (t describeGroupsResponseGroupV0) writeTo(wb *writeBuffer) { + wb.writeInt16(t.ErrorCode) + wb.writeString(t.GroupID) + wb.writeString(t.State) + wb.writeString(t.ProtocolType) + wb.writeString(t.Protocol) + wb.writeArray(len(t.Members), func(i int) { t.Members[i].writeTo(wb) }) } -func (t *describeGroupsResponseGroupV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { +func (t *describeGroupsResponseGroupV0) readFrom(r *bufio.Reader, size int) (remain int, err error) { if remain, err = readInt16(r, size, &t.ErrorCode); err != nil { return } @@ -132,7 +132,7 @@ func (t *describeGroupsResponseGroupV1) readFrom(r *bufio.Reader, size int) (rem } fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { - item := describeGroupsResponseMemberV1{} + item := describeGroupsResponseMemberV0{} if fnRemain, fnErr = (&item).readFrom(r, size); err != nil { return } @@ -146,39 +146,29 @@ func (t *describeGroupsResponseGroupV1) readFrom(r *bufio.Reader, size int) (rem return } -type describeGroupsResponseV1 struct { - // Duration in milliseconds for which the request was throttled due - // to quota violation (Zero if the request did not violate any quota) - ThrottleTimeMS int32 - +type describeGroupsResponseV0 struct { // Groups holds selected group information - Groups []describeGroupsResponseGroupV1 + Groups []describeGroupsResponseGroupV0 } -func (t describeGroupsResponseV1) size() int32 { - return sizeofInt32(t.ThrottleTimeMS) + - sizeofArray(len(t.Groups), func(i int) int32 { return t.Groups[i].size() }) +func (t describeGroupsResponseV0) size() int32 { + return sizeofArray(len(t.Groups), func(i int) int32 { return t.Groups[i].size() }) } -func (t describeGroupsResponseV1) writeTo(w *bufio.Writer) { - writeInt32(w, t.ThrottleTimeMS) - writeArray(w, len(t.Groups), func(i int) { t.Groups[i].writeTo(w) }) +func (t describeGroupsResponseV0) writeTo(wb *writeBuffer) { + wb.writeArray(len(t.Groups), func(i int) { t.Groups[i].writeTo(wb) }) } -func (t *describeGroupsResponseV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { - if remain, err = readInt32(r, size, &t.ThrottleTimeMS); err != nil { - return - } - +func (t *describeGroupsResponseV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) { fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { - item := describeGroupsResponseGroupV1{} + item := describeGroupsResponseGroupV0{} if fnRemain, fnErr = (&item).readFrom(r, size); fnErr != nil { return } t.Groups = append(t.Groups, item) return } - if remain, err = readArrayWith(r, remain, fn); err != nil { + if remain, err = readArrayWith(r, sz, fn); err != nil { return } diff --git a/vendor/github.com/segmentio/kafka-go/dialer.go b/vendor/github.com/segmentio/kafka-go/dialer.go index 57da750de..f019a421b 100644 --- a/vendor/github.com/segmentio/kafka-go/dialer.go +++ b/vendor/github.com/segmentio/kafka-go/dialer.go @@ -3,9 +3,13 @@ package kafka import ( "context" "crypto/tls" + "io" "net" "strconv" + "strings" "time" + + "github.com/segmentio/kafka-go/sasl" ) // The Dialer type mirrors the net.Dialer API but is designed to open kafka @@ -60,6 +64,16 @@ type Dialer struct { // TLS enables Dialer to open secure connections. If nil, standard net.Conn // will be used. TLS *tls.Config + + // SASLMechanism configures the Dialer to use SASL authentication. If nil, + // no authentication will be performed. + SASLMechanism sasl.Mechanism + + // The transactional id to use for transactional delivery. Idempotent + // deliver should be enabled if transactional id is configured. + // For more details look at transactional.id description here: http://kafka.apache.org/documentation.html#producerconfigs + // Empty string means that the connection will be non-transactional. + TransactionalID string } // Dial connects to the address on the named network. @@ -93,11 +107,15 @@ func (d *Dialer) DialContext(ctx context.Context, network string, address string defer cancel() } - c, err := d.dialContext(ctx, network, address) - if err != nil { - return nil, err - } - return NewConnWith(c, ConnConfig{ClientID: d.ClientID}), nil + return d.connect( + ctx, + network, + address, + ConnConfig{ + ClientID: d.ClientID, + TransactionalID: d.TransactionalID, + }, + ) } // DialLeader opens a connection to the leader of the partition for a given @@ -109,39 +127,50 @@ func (d *Dialer) DialContext(ctx context.Context, network string, address string // The original address is only used as a mechanism to discover the // configuration of the kafka cluster that we're connecting to. func (d *Dialer) DialLeader(ctx context.Context, network string, address string, topic string, partition int) (*Conn, error) { - b, err := d.LookupLeader(ctx, network, address, topic, partition) - if err != nil { - return nil, err - } - - c, err := d.dialContext(ctx, network, net.JoinHostPort(b.Host, strconv.Itoa(b.Port))) + p, err := d.LookupPartition(ctx, network, address, topic, partition) if err != nil { return nil, err } + return d.DialPartition(ctx, network, address, p) +} - return NewConnWith(c, ConnConfig{ - ClientID: d.ClientID, - Topic: topic, - Partition: partition, - }), nil +// DialPartition opens a connection to the leader of the partition specified by partition +// descriptor. It's strongly advised to use descriptor of the partition that comes out of +// functions LookupPartition or LookupPartitions. +func (d *Dialer) DialPartition(ctx context.Context, network string, address string, partition Partition) (*Conn, error) { + return d.connect(ctx, network, net.JoinHostPort(partition.Leader.Host, strconv.Itoa(partition.Leader.Port)), ConnConfig{ + ClientID: d.ClientID, + Topic: partition.Topic, + Partition: partition.ID, + TransactionalID: d.TransactionalID, + }) } // LookupLeader searches for the kafka broker that is the leader of the // partition for a given topic, returning a Broker value representing it. func (d *Dialer) LookupLeader(ctx context.Context, network string, address string, topic string, partition int) (Broker, error) { + p, err := d.LookupPartition(ctx, network, address, topic, partition) + return p.Leader, err +} + +// LookupPartition searches for the description of specified partition id. +func (d *Dialer) LookupPartition(ctx context.Context, network string, address string, topic string, partition int) (Partition, error) { c, err := d.DialContext(ctx, network, address) if err != nil { - return Broker{}, err + return Partition{}, err } defer c.Close() - brkch := make(chan Broker, 1) + brkch := make(chan Partition, 1) errch := make(chan error, 1) go func() { for attempt := 0; true; attempt++ { if attempt != 0 { - sleep(ctx, backoff(attempt, 100*time.Millisecond, 10*time.Second)) + if !sleep(ctx, backoff(attempt, 100*time.Millisecond, 10*time.Second)) { + errch <- ctx.Err() + return + } } partitions, err := c.ReadPartitions(topic) @@ -155,7 +184,7 @@ func (d *Dialer) LookupLeader(ctx context.Context, network string, address strin for _, p := range partitions { if p.ID == partition { - brkch <- p.Leader + brkch <- p return } } @@ -164,14 +193,14 @@ func (d *Dialer) LookupLeader(ctx context.Context, network string, address strin errch <- UnknownTopicOrPartition }() - var brk Broker + var prt Partition select { - case brk = <-brkch: + case prt = <-brkch: case err = <-errch: case <-ctx.Done(): err = ctx.Err() } - return brk, err + return prt, err } // LookupPartitions returns the list of partitions that exist for the given topic. @@ -204,8 +233,8 @@ func (d *Dialer) LookupPartitions(ctx context.Context, network string, address s } // connectTLS returns a tls.Conn that has already completed the Handshake -func (d *Dialer) connectTLS(ctx context.Context, conn net.Conn) (tlsConn *tls.Conn, err error) { - tlsConn = tls.Client(conn, d.TLS) +func (d *Dialer) connectTLS(ctx context.Context, conn net.Conn, config *tls.Config) (tlsConn *tls.Conn, err error) { + tlsConn = tls.Client(conn, config) errch := make(chan error) go func() { @@ -226,6 +255,65 @@ func (d *Dialer) connectTLS(ctx context.Context, conn net.Conn) (tlsConn *tls.Co return } +// connect opens a socket connection to the broker, wraps it to create a +// kafka connection, and performs SASL authentication if configured to do so. +func (d *Dialer) connect(ctx context.Context, network, address string, connCfg ConnConfig) (*Conn, error) { + + c, err := d.dialContext(ctx, network, address) + if err != nil { + return nil, err + } + + conn := NewConnWith(c, connCfg) + + if d.SASLMechanism != nil { + if err := d.authenticateSASL(ctx, conn); err != nil { + _ = conn.Close() + return nil, err + } + } + + return conn, nil +} + +// authenticateSASL performs all of the required requests to authenticate this +// connection. If any step fails, this function returns with an error. A nil +// error indicates successful authentication. +// +// In case of error, this function *does not* close the connection. That is the +// responsibility of the caller. +func (d *Dialer) authenticateSASL(ctx context.Context, conn *Conn) error { + if err := conn.saslHandshake(d.SASLMechanism.Name()); err != nil { + return err + } + + sess, state, err := d.SASLMechanism.Start(ctx) + if err != nil { + return err + } + + for completed := false; !completed; { + challenge, err := conn.saslAuthenticate(state) + switch err { + case nil: + case io.EOF: + // the broker may communicate a failed exchange by closing the + // connection (esp. in the case where we're passing opaque sasl + // data over the wire since there's no protocol info). + return SASLAuthenticationFailed + default: + return err + } + + completed, state, err = sess.Next(ctx, challenge) + if err != nil { + return err + } + } + + return nil +} + func (d *Dialer) dialContext(ctx context.Context, network string, address string) (net.Conn, error) { if r := d.Resolver; r != nil { host, port := splitHostPort(address) @@ -253,7 +341,20 @@ func (d *Dialer) dialContext(ctx context.Context, network string, address string } if d.TLS != nil { - return d.connectTLS(ctx, conn) + c := d.TLS + // If no ServerName is set, infer the ServerName + // from the hostname we're connecting to. + if c.ServerName == "" { + c = d.TLS.Clone() + // Copied from tls.go in the standard library. + colonPos := strings.LastIndex(address, ":") + if colonPos == -1 { + colonPos = len(address) + } + hostname := address[:colonPos] + c.ServerName = hostname + } + return d.connectTLS(ctx, conn, c) } return conn, nil @@ -280,6 +381,21 @@ func DialLeader(ctx context.Context, network string, address string, topic strin return DefaultDialer.DialLeader(ctx, network, address, topic, partition) } +// DialPartition is a convenience wrapper for DefaultDialer.DialPartition. +func DialPartition(ctx context.Context, network string, address string, partition Partition) (*Conn, error) { + return DefaultDialer.DialPartition(ctx, network, address, partition) +} + +// LookupPartition is a convenience wrapper for DefaultDialer.LookupPartition. +func LookupPartition(ctx context.Context, network string, address string, topic string, partition int) (Partition, error) { + return DefaultDialer.LookupPartition(ctx, network, address, topic, partition) +} + +// LookupPartitions is a convenience wrapper for DefaultDialer.LookupPartitions. +func LookupPartitions(ctx context.Context, network string, address string, topic string) ([]Partition, error) { + return DefaultDialer.LookupPartitions(ctx, network, address, topic) +} + // The Resolver interface is used as an abstraction to provide service discovery // of the hosts of a kafka cluster. type Resolver interface { diff --git a/vendor/github.com/segmentio/kafka-go/discard.go b/vendor/github.com/segmentio/kafka-go/discard.go index bbb56b620..c92d53035 100644 --- a/vendor/github.com/segmentio/kafka-go/discard.go +++ b/vendor/github.com/segmentio/kafka-go/discard.go @@ -3,7 +3,15 @@ package kafka import "bufio" func discardN(r *bufio.Reader, sz int, n int) (int, error) { - n, err := r.Discard(n) + var err error + if n <= sz { + n, err = r.Discard(n) + } else { + n, err = r.Discard(sz) + if err == nil { + err = errShortRead + } + } return sz - n, err } diff --git a/vendor/github.com/segmentio/kafka-go/error.go b/vendor/github.com/segmentio/kafka-go/error.go index 0559af628..dc91e6e2c 100644 --- a/vendor/github.com/segmentio/kafka-go/error.go +++ b/vendor/github.com/segmentio/kafka-go/error.go @@ -64,6 +64,27 @@ const ( TransactionalIDAuthorizationFailed Error = 53 SecurityDisabled Error = 54 BrokerAuthorizationFailed Error = 55 + KafkaStorageError Error = 56 + LogDirNotFound Error = 57 + SASLAuthenticationFailed Error = 58 + UnknownProducerId Error = 59 + ReassignmentInProgress Error = 60 + DelegationTokenAuthDisabled Error = 61 + DelegationTokenNotFound Error = 62 + DelegationTokenOwnerMismatch Error = 63 + DelegationTokenRequestNotAllowed Error = 64 + DelegationTokenAuthorizationFailed Error = 65 + DelegationTokenExpired Error = 66 + InvalidPrincipalType Error = 67 + NonEmptyGroup Error = 68 + GroupIdNotFound Error = 69 + FetchSessionIDNotFound Error = 70 + InvalidFetchSessionEpoch Error = 71 + ListenerNotFound Error = 72 + TopicDeletionDisabled Error = 73 + FencedLeaderEpoch Error = 74 + UnknownLeaderEpoch Error = 75 + UnsupportedCompressionType Error = 76 ) // Error satisfies the error interface. @@ -201,6 +222,48 @@ func (e Error) Title() string { return "Security Disabled" case BrokerAuthorizationFailed: return "Broker Authorization Failed" + case KafkaStorageError: + return "Kafka Storage Error" + case LogDirNotFound: + return "Log Dir Not Found" + case SASLAuthenticationFailed: + return "SASL Authentication Failed" + case UnknownProducerId: + return "Unknown Producer ID" + case ReassignmentInProgress: + return "Reassignment In Progress" + case DelegationTokenAuthDisabled: + return "Delegation Token Auth Disabled" + case DelegationTokenNotFound: + return "Delegation Token Not Found" + case DelegationTokenOwnerMismatch: + return "Delegation Token Owner Mismatch" + case DelegationTokenRequestNotAllowed: + return "Delegation Token Request Not Allowed" + case DelegationTokenAuthorizationFailed: + return "Delegation Token Authorization Failed" + case DelegationTokenExpired: + return "Delegation Token Expired" + case InvalidPrincipalType: + return "Invalid Principal Type" + case NonEmptyGroup: + return "Non Empty Group" + case GroupIdNotFound: + return "Group ID Not Found" + case FetchSessionIDNotFound: + return "Fetch Session ID Not Found" + case InvalidFetchSessionEpoch: + return "Invalid Fetch Session Epoch" + case ListenerNotFound: + return "Listener Not Found" + case TopicDeletionDisabled: + return "Topic Deletion Disabled" + case FencedLeaderEpoch: + return "Fenced Leader Epoch" + case UnknownLeaderEpoch: + return "Unknown Leader Epoch" + case UnsupportedCompressionType: + return "Unsupported Compression Type" } return "" } @@ -318,6 +381,48 @@ func (e Error) Description() string { return "the security features are disabled" case BrokerAuthorizationFailed: return "the broker authorization failed" + case KafkaStorageError: + return "disk error when trying to access log file on the disk" + case LogDirNotFound: + return "the user-specified log directory is not found in the broker config" + case SASLAuthenticationFailed: + return "SASL Authentication failed" + case UnknownProducerId: + return "the broker could not locate the producer metadata associated with the producer ID" + case ReassignmentInProgress: + return "a partition reassignment is in progress" + case DelegationTokenAuthDisabled: + return "delegation token feature is not enabled" + case DelegationTokenNotFound: + return "delegation token is not found on server" + case DelegationTokenOwnerMismatch: + return "specified principal is not valid owner/renewer" + case DelegationTokenRequestNotAllowed: + return "delegation token requests are not allowed on plaintext/1-way ssl channels and on delegation token authenticated channels" + case DelegationTokenAuthorizationFailed: + return "delegation token authorization failed" + case DelegationTokenExpired: + return "delegation token is expired" + case InvalidPrincipalType: + return "supplied principaltype is not supported" + case NonEmptyGroup: + return "the group is not empty" + case GroupIdNotFound: + return "the group ID does not exist" + case FetchSessionIDNotFound: + return "the fetch session ID was not found" + case InvalidFetchSessionEpoch: + return "the fetch session epoch is invalid" + case ListenerNotFound: + return "there is no listener on the leader broker that matches the listener on which metadata request was processed" + case TopicDeletionDisabled: + return "topic deletion is disabled" + case FencedLeaderEpoch: + return "the leader epoch in the request is older than the epoch on the broker" + case UnknownLeaderEpoch: + return "the leader epoch in the request is newer than the epoch on the broker" + case UnsupportedCompressionType: + return "the requesting client does not support the compression type of given partition" } return "" } @@ -358,3 +463,12 @@ func coalesceErrors(errs ...error) error { } return nil } + +type MessageTooLargeError struct { + Message Message + Remaining []Message +} + +func (e MessageTooLargeError) Error() string { + return MessageSizeTooLarge.Error() +} diff --git a/vendor/github.com/segmentio/kafka-go/fetch.go b/vendor/github.com/segmentio/kafka-go/fetch.go index 326594dcc..b742fef20 100644 --- a/vendor/github.com/segmentio/kafka-go/fetch.go +++ b/vendor/github.com/segmentio/kafka-go/fetch.go @@ -1,86 +1,84 @@ package kafka -import "bufio" - -type fetchRequestV1 struct { +type fetchRequestV2 struct { ReplicaID int32 MaxWaitTime int32 MinBytes int32 - Topics []fetchRequestTopicV1 + Topics []fetchRequestTopicV2 } -func (r fetchRequestV1) size() int32 { +func (r fetchRequestV2) size() int32 { return 4 + 4 + 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() }) } -func (r fetchRequestV1) writeTo(w *bufio.Writer) { - writeInt32(w, r.ReplicaID) - writeInt32(w, r.MaxWaitTime) - writeInt32(w, r.MinBytes) - writeArray(w, len(r.Topics), func(i int) { r.Topics[i].writeTo(w) }) +func (r fetchRequestV2) writeTo(wb *writeBuffer) { + wb.writeInt32(r.ReplicaID) + wb.writeInt32(r.MaxWaitTime) + wb.writeInt32(r.MinBytes) + wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) }) } -type fetchRequestTopicV1 struct { +type fetchRequestTopicV2 struct { TopicName string - Partitions []fetchRequestPartitionV1 + Partitions []fetchRequestPartitionV2 } -func (t fetchRequestTopicV1) size() int32 { +func (t fetchRequestTopicV2) size() int32 { return sizeofString(t.TopicName) + sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() }) } -func (t fetchRequestTopicV1) writeTo(w *bufio.Writer) { - writeString(w, t.TopicName) - writeArray(w, len(t.Partitions), func(i int) { t.Partitions[i].writeTo(w) }) +func (t fetchRequestTopicV2) writeTo(wb *writeBuffer) { + wb.writeString(t.TopicName) + wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) }) } -type fetchRequestPartitionV1 struct { +type fetchRequestPartitionV2 struct { Partition int32 FetchOffset int64 MaxBytes int32 } -func (p fetchRequestPartitionV1) size() int32 { +func (p fetchRequestPartitionV2) size() int32 { return 4 + 8 + 4 } -func (p fetchRequestPartitionV1) writeTo(w *bufio.Writer) { - writeInt32(w, p.Partition) - writeInt64(w, p.FetchOffset) - writeInt32(w, p.MaxBytes) +func (p fetchRequestPartitionV2) writeTo(wb *writeBuffer) { + wb.writeInt32(p.Partition) + wb.writeInt64(p.FetchOffset) + wb.writeInt32(p.MaxBytes) } -type fetchResponseV1 struct { +type fetchResponseV2 struct { ThrottleTime int32 - Topics []fetchResponseTopicV1 + Topics []fetchResponseTopicV2 } -func (r fetchResponseV1) size() int32 { +func (r fetchResponseV2) size() int32 { return 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() }) } -func (r fetchResponseV1) writeTo(w *bufio.Writer) { - writeInt32(w, r.ThrottleTime) - writeArray(w, len(r.Topics), func(i int) { r.Topics[i].writeTo(w) }) +func (r fetchResponseV2) writeTo(wb *writeBuffer) { + wb.writeInt32(r.ThrottleTime) + wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) }) } -type fetchResponseTopicV1 struct { +type fetchResponseTopicV2 struct { TopicName string - Partitions []fetchResponsePartitionV1 + Partitions []fetchResponsePartitionV2 } -func (t fetchResponseTopicV1) size() int32 { +func (t fetchResponseTopicV2) size() int32 { return sizeofString(t.TopicName) + sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() }) } -func (t fetchResponseTopicV1) writeTo(w *bufio.Writer) { - writeString(w, t.TopicName) - writeArray(w, len(t.Partitions), func(i int) { t.Partitions[i].writeTo(w) }) +func (t fetchResponseTopicV2) writeTo(wb *writeBuffer) { + wb.writeString(t.TopicName) + wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) }) } -type fetchResponsePartitionV1 struct { +type fetchResponsePartitionV2 struct { Partition int32 ErrorCode int16 HighwaterMarkOffset int64 @@ -88,14 +86,14 @@ type fetchResponsePartitionV1 struct { MessageSet messageSet } -func (p fetchResponsePartitionV1) size() int32 { +func (p fetchResponsePartitionV2) size() int32 { return 4 + 2 + 8 + 4 + p.MessageSet.size() } -func (p fetchResponsePartitionV1) writeTo(w *bufio.Writer) { - writeInt32(w, p.Partition) - writeInt16(w, p.ErrorCode) - writeInt64(w, p.HighwaterMarkOffset) - writeInt32(w, p.MessageSetSize) - p.MessageSet.writeTo(w) +func (p fetchResponsePartitionV2) writeTo(wb *writeBuffer) { + wb.writeInt32(p.Partition) + wb.writeInt16(p.ErrorCode) + wb.writeInt64(p.HighwaterMarkOffset) + wb.writeInt32(p.MessageSetSize) + p.MessageSet.writeTo(wb) } diff --git a/vendor/github.com/segmentio/kafka-go/findcoordinator.go b/vendor/github.com/segmentio/kafka-go/findcoordinator.go index 750ba7c87..e20e7a3f7 100644 --- a/vendor/github.com/segmentio/kafka-go/findcoordinator.go +++ b/vendor/github.com/segmentio/kafka-go/findcoordinator.go @@ -4,28 +4,24 @@ import ( "bufio" ) -// FindCoordinatorRequestV1 requests the coordinator for the specified group or transaction +// FindCoordinatorRequestV0 requests the coordinator for the specified group or transaction // // See http://kafka.apache.org/protocol.html#The_Messages_FindCoordinator -type findCoordinatorRequestV1 struct { +type findCoordinatorRequestV0 struct { // CoordinatorKey holds id to use for finding the coordinator (for groups, this is // the groupId, for transactional producers, this is the transactional id) CoordinatorKey string - - // CoordinatorType indicates type of coordinator to find (0 = group, 1 = transaction) - CoordinatorType int8 } -func (t findCoordinatorRequestV1) size() int32 { - return sizeofString(t.CoordinatorKey) + sizeof(t.CoordinatorType) +func (t findCoordinatorRequestV0) size() int32 { + return sizeofString(t.CoordinatorKey) } -func (t findCoordinatorRequestV1) writeTo(w *bufio.Writer) { - writeString(w, t.CoordinatorKey) - writeInt8(w, t.CoordinatorType) +func (t findCoordinatorRequestV0) writeTo(wb *writeBuffer) { + wb.writeString(t.CoordinatorKey) } -type findCoordinatorResponseCoordinatorV1 struct { +type findCoordinatorResponseCoordinatorV0 struct { // NodeID holds the broker id. NodeID int32 @@ -36,19 +32,19 @@ type findCoordinatorResponseCoordinatorV1 struct { Port int32 } -func (t findCoordinatorResponseCoordinatorV1) size() int32 { +func (t findCoordinatorResponseCoordinatorV0) size() int32 { return sizeofInt32(t.NodeID) + sizeofString(t.Host) + sizeofInt32(t.Port) } -func (t findCoordinatorResponseCoordinatorV1) writeTo(w *bufio.Writer) { - writeInt32(w, t.NodeID) - writeString(w, t.Host) - writeInt32(w, t.Port) +func (t findCoordinatorResponseCoordinatorV0) writeTo(wb *writeBuffer) { + wb.writeInt32(t.NodeID) + wb.writeString(t.Host) + wb.writeInt32(t.Port) } -func (t *findCoordinatorResponseCoordinatorV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { +func (t *findCoordinatorResponseCoordinatorV0) readFrom(r *bufio.Reader, size int) (remain int, err error) { if remain, err = readInt32(r, size, &t.NodeID); err != nil { return } @@ -61,44 +57,26 @@ func (t *findCoordinatorResponseCoordinatorV1) readFrom(r *bufio.Reader, size in return } -type findCoordinatorResponseV1 struct { - // ThrottleTimeMS holds the duration in milliseconds for which the request - // was throttled due to quota violation (Zero if the request did not violate - // any quota) - ThrottleTimeMS int32 - +type findCoordinatorResponseV0 struct { // ErrorCode holds response error code ErrorCode int16 - // ErrorMessage holds response error message - ErrorMessage string - // Coordinator holds host and port information for the coordinator - Coordinator findCoordinatorResponseCoordinatorV1 + Coordinator findCoordinatorResponseCoordinatorV0 } -func (t findCoordinatorResponseV1) size() int32 { - return sizeofInt32(t.ThrottleTimeMS) + - sizeofInt16(t.ErrorCode) + - sizeofString(t.ErrorMessage) + +func (t findCoordinatorResponseV0) size() int32 { + return sizeofInt16(t.ErrorCode) + t.Coordinator.size() } -func (t findCoordinatorResponseV1) writeTo(w *bufio.Writer) { - writeInt32(w, t.ThrottleTimeMS) - writeInt16(w, t.ErrorCode) - writeString(w, t.ErrorMessage) - t.Coordinator.writeTo(w) +func (t findCoordinatorResponseV0) writeTo(wb *writeBuffer) { + wb.writeInt16(t.ErrorCode) + t.Coordinator.writeTo(wb) } -func (t *findCoordinatorResponseV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { - if remain, err = readInt32(r, size, &t.ThrottleTimeMS); err != nil { - return - } - if remain, err = readInt16(r, remain, &t.ErrorCode); err != nil { - return - } - if remain, err = readString(r, remain, &t.ErrorMessage); err != nil { +func (t *findCoordinatorResponseV0) readFrom(r *bufio.Reader, size int) (remain int, err error) { + if remain, err = readInt16(r, size, &t.ErrorCode); err != nil { return } if remain, err = (&t.Coordinator).readFrom(r, remain); err != nil { diff --git a/vendor/github.com/segmentio/kafka-go/groupbalancer.go b/vendor/github.com/segmentio/kafka-go/groupbalancer.go new file mode 100644 index 000000000..7e46cc7d4 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/groupbalancer.go @@ -0,0 +1,187 @@ +package kafka + +import "sort" + +// GroupMember describes a single participant in a consumer group. +type GroupMember struct { + // ID is the unique ID for this member as taken from the JoinGroup response. + ID string + + // Topics is a list of topics that this member is consuming. + Topics []string + + // UserData contains any information that the GroupBalancer sent to the + // consumer group coordinator. + UserData []byte +} + +// GroupMemberAssignments holds MemberID => topic => partitions +type GroupMemberAssignments map[string]map[string][]int + +// GroupBalancer encapsulates the client side rebalancing logic +type GroupBalancer interface { + // ProtocolName of the GroupBalancer + ProtocolName() string + + // UserData provides the GroupBalancer an opportunity to embed custom + // UserData into the metadata. + // + // Will be used by JoinGroup to begin the consumer group handshake. + // + // See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-JoinGroupRequest + UserData() ([]byte, error) + + // DefineMemberships returns which members will be consuming + // which topic partitions + AssignGroups(members []GroupMember, partitions []Partition) GroupMemberAssignments +} + +// RangeGroupBalancer groups consumers by partition +// +// Example: 5 partitions, 2 consumers +// C0: [0, 1, 2] +// C1: [3, 4] +// +// Example: 6 partitions, 3 consumers +// C0: [0, 1] +// C1: [2, 3] +// C2: [4, 5] +// +type RangeGroupBalancer struct{} + +func (r RangeGroupBalancer) ProtocolName() string { + return "range" +} + +func (r RangeGroupBalancer) UserData() ([]byte, error) { + return nil, nil +} + +func (r RangeGroupBalancer) AssignGroups(members []GroupMember, topicPartitions []Partition) GroupMemberAssignments { + groupAssignments := GroupMemberAssignments{} + membersByTopic := findMembersByTopic(members) + + for topic, members := range membersByTopic { + partitions := findPartitions(topic, topicPartitions) + partitionCount := len(partitions) + memberCount := len(members) + + for memberIndex, member := range members { + assignmentsByTopic, ok := groupAssignments[member.ID] + if !ok { + assignmentsByTopic = map[string][]int{} + groupAssignments[member.ID] = assignmentsByTopic + } + + minIndex := memberIndex * partitionCount / memberCount + maxIndex := (memberIndex + 1) * partitionCount / memberCount + + for partitionIndex, partition := range partitions { + if partitionIndex >= minIndex && partitionIndex < maxIndex { + assignmentsByTopic[topic] = append(assignmentsByTopic[topic], partition) + } + } + } + } + + return groupAssignments +} + +// RoundrobinGroupBalancer divides partitions evenly among consumers +// +// Example: 5 partitions, 2 consumers +// C0: [0, 2, 4] +// C1: [1, 3] +// +// Example: 6 partitions, 3 consumers +// C0: [0, 3] +// C1: [1, 4] +// C2: [2, 5] +// +type RoundRobinGroupBalancer struct{} + +func (r RoundRobinGroupBalancer) ProtocolName() string { + return "roundrobin" +} + +func (r RoundRobinGroupBalancer) UserData() ([]byte, error) { + return nil, nil +} + +func (r RoundRobinGroupBalancer) AssignGroups(members []GroupMember, topicPartitions []Partition) GroupMemberAssignments { + groupAssignments := GroupMemberAssignments{} + membersByTopic := findMembersByTopic(members) + for topic, members := range membersByTopic { + partitionIDs := findPartitions(topic, topicPartitions) + memberCount := len(members) + + for memberIndex, member := range members { + assignmentsByTopic, ok := groupAssignments[member.ID] + if !ok { + assignmentsByTopic = map[string][]int{} + groupAssignments[member.ID] = assignmentsByTopic + } + + for partitionIndex, partition := range partitionIDs { + if (partitionIndex % memberCount) == memberIndex { + assignmentsByTopic[topic] = append(assignmentsByTopic[topic], partition) + } + } + } + } + + return groupAssignments +} + +// findPartitions extracts the partition ids associated with the topic from the +// list of Partitions provided +func findPartitions(topic string, partitions []Partition) []int { + var ids []int + for _, partition := range partitions { + if partition.Topic == topic { + ids = append(ids, partition.ID) + } + } + return ids +} + +// findMembersByTopic groups the memberGroupMetadata by topic +func findMembersByTopic(members []GroupMember) map[string][]GroupMember { + membersByTopic := map[string][]GroupMember{} + for _, member := range members { + for _, topic := range member.Topics { + membersByTopic[topic] = append(membersByTopic[topic], member) + } + } + + // normalize ordering of members to enabling grouping across topics by partitions + // + // Want: + // C0 [T0/P0, T1/P0] + // C1 [T0/P1, T1/P1] + // + // Not: + // C0 [T0/P0, T1/P1] + // C1 [T0/P1, T1/P0] + // + // Even though the later is still round robin, the partitions are crossed + // + for _, members := range membersByTopic { + sort.Slice(members, func(i, j int) bool { + return members[i].ID < members[j].ID + }) + } + + return membersByTopic +} + +// findGroupBalancer returns the GroupBalancer with the specified protocolName +// from the slice provided +func findGroupBalancer(protocolName string, balancers []GroupBalancer) (GroupBalancer, bool) { + for _, balancer := range balancers { + if balancer.ProtocolName() == protocolName { + return balancer, true + } + } + return nil, false +} diff --git a/vendor/github.com/segmentio/kafka-go/heartbeat.go b/vendor/github.com/segmentio/kafka-go/heartbeat.go index 5a683adcf..816ce7e7c 100644 --- a/vendor/github.com/segmentio/kafka-go/heartbeat.go +++ b/vendor/github.com/segmentio/kafka-go/heartbeat.go @@ -2,7 +2,7 @@ package kafka import "bufio" -type heartbeatRequestV1 struct { +type heartbeatRequestV0 struct { // GroupID holds the unique group identifier GroupID string @@ -13,43 +13,33 @@ type heartbeatRequestV1 struct { MemberID string } -func (t heartbeatRequestV1) size() int32 { +func (t heartbeatRequestV0) size() int32 { return sizeofString(t.GroupID) + sizeofInt32(t.GenerationID) + sizeofString(t.MemberID) } -func (t heartbeatRequestV1) writeTo(w *bufio.Writer) { - writeString(w, t.GroupID) - writeInt32(w, t.GenerationID) - writeString(w, t.MemberID) +func (t heartbeatRequestV0) writeTo(wb *writeBuffer) { + wb.writeString(t.GroupID) + wb.writeInt32(t.GenerationID) + wb.writeString(t.MemberID) } -type heartbeatResponseV1 struct { - // ThrottleTimeMS holds the duration in milliseconds for which the request - // was throttled due to quota violation (Zero if the request did not violate - // any quota) - ThrottleTimeMS int32 - +type heartbeatResponseV0 struct { // ErrorCode holds response error code ErrorCode int16 } -func (t heartbeatResponseV1) size() int32 { - return sizeofInt32(t.ThrottleTimeMS) + - sizeofInt16(t.ErrorCode) +func (t heartbeatResponseV0) size() int32 { + return sizeofInt16(t.ErrorCode) } -func (t heartbeatResponseV1) writeTo(w *bufio.Writer) { - writeInt32(w, t.ThrottleTimeMS) - writeInt16(w, t.ErrorCode) +func (t heartbeatResponseV0) writeTo(wb *writeBuffer) { + wb.writeInt16(t.ErrorCode) } -func (t *heartbeatResponseV1) readFrom(r *bufio.Reader, sz int) (remain int, err error) { - if remain, err = readInt32(r, sz, &t.ThrottleTimeMS); err != nil { - return - } - if remain, err = readInt16(r, remain, &t.ErrorCode); err != nil { +func (t *heartbeatResponseV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) { + if remain, err = readInt16(r, sz, &t.ErrorCode); err != nil { return } return diff --git a/vendor/github.com/segmentio/kafka-go/joingroup.go b/vendor/github.com/segmentio/kafka-go/joingroup.go index 2e1e06006..6ca959698 100644 --- a/vendor/github.com/segmentio/kafka-go/joingroup.go +++ b/vendor/github.com/segmentio/kafka-go/joingroup.go @@ -24,17 +24,15 @@ func (t groupMetadata) size() int32 { sizeofBytes(t.UserData) } -func (t groupMetadata) writeTo(w *bufio.Writer) { - writeInt16(w, t.Version) - writeStringArray(w, t.Topics) - writeBytes(w, t.UserData) +func (t groupMetadata) writeTo(wb *writeBuffer) { + wb.writeInt16(t.Version) + wb.writeStringArray(t.Topics) + wb.writeBytes(t.UserData) } func (t groupMetadata) bytes() []byte { buf := bytes.NewBuffer(nil) - w := bufio.NewWriter(buf) - t.writeTo(w) - w.Flush() + t.writeTo(&writeBuffer{w: buf}) return buf.Bytes() } @@ -51,22 +49,22 @@ func (t *groupMetadata) readFrom(r *bufio.Reader, size int) (remain int, err err return } -type joinGroupRequestGroupProtocolV2 struct { +type joinGroupRequestGroupProtocolV1 struct { ProtocolName string ProtocolMetadata []byte } -func (t joinGroupRequestGroupProtocolV2) size() int32 { +func (t joinGroupRequestGroupProtocolV1) size() int32 { return sizeofString(t.ProtocolName) + sizeofBytes(t.ProtocolMetadata) } -func (t joinGroupRequestGroupProtocolV2) writeTo(w *bufio.Writer) { - writeString(w, t.ProtocolName) - writeBytes(w, t.ProtocolMetadata) +func (t joinGroupRequestGroupProtocolV1) writeTo(wb *writeBuffer) { + wb.writeString(t.ProtocolName) + wb.writeBytes(t.ProtocolMetadata) } -type joinGroupRequestV2 struct { +type joinGroupRequestV1 struct { // GroupID holds the unique group identifier GroupID string @@ -86,10 +84,10 @@ type joinGroupRequestV2 struct { ProtocolType string // GroupProtocols holds the list of protocols that the member supports - GroupProtocols []joinGroupRequestGroupProtocolV2 + GroupProtocols []joinGroupRequestGroupProtocolV1 } -func (t joinGroupRequestV2) size() int32 { +func (t joinGroupRequestV1) size() int32 { return sizeofString(t.GroupID) + sizeofInt32(t.SessionTimeout) + sizeofInt32(t.RebalanceTimeout) + @@ -98,32 +96,32 @@ func (t joinGroupRequestV2) size() int32 { sizeofArray(len(t.GroupProtocols), func(i int) int32 { return t.GroupProtocols[i].size() }) } -func (t joinGroupRequestV2) writeTo(w *bufio.Writer) { - writeString(w, t.GroupID) - writeInt32(w, t.SessionTimeout) - writeInt32(w, t.RebalanceTimeout) - writeString(w, t.MemberID) - writeString(w, t.ProtocolType) - writeArray(w, len(t.GroupProtocols), func(i int) { t.GroupProtocols[i].writeTo(w) }) +func (t joinGroupRequestV1) writeTo(wb *writeBuffer) { + wb.writeString(t.GroupID) + wb.writeInt32(t.SessionTimeout) + wb.writeInt32(t.RebalanceTimeout) + wb.writeString(t.MemberID) + wb.writeString(t.ProtocolType) + wb.writeArray(len(t.GroupProtocols), func(i int) { t.GroupProtocols[i].writeTo(wb) }) } -type joinGroupResponseMemberV2 struct { +type joinGroupResponseMemberV1 struct { // MemberID assigned by the group coordinator MemberID string MemberMetadata []byte } -func (t joinGroupResponseMemberV2) size() int32 { +func (t joinGroupResponseMemberV1) size() int32 { return sizeofString(t.MemberID) + sizeofBytes(t.MemberMetadata) } -func (t joinGroupResponseMemberV2) writeTo(w *bufio.Writer) { - writeString(w, t.MemberID) - writeBytes(w, t.MemberMetadata) +func (t joinGroupResponseMemberV1) writeTo(wb *writeBuffer) { + wb.writeString(t.MemberID) + wb.writeBytes(t.MemberMetadata) } -func (t *joinGroupResponseMemberV2) readFrom(r *bufio.Reader, size int) (remain int, err error) { +func (t *joinGroupResponseMemberV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { if remain, err = readString(r, size, &t.MemberID); err != nil { return } @@ -133,12 +131,7 @@ func (t *joinGroupResponseMemberV2) readFrom(r *bufio.Reader, size int) (remain return } -type joinGroupResponseV2 struct { - // ThrottleTimeMS holds the duration in milliseconds for which the request - // was throttled due to quota violation (Zero if the request did not violate - // any quota) - ThrottleTimeMS int32 - +type joinGroupResponseV1 struct { // ErrorCode holds response error code ErrorCode int16 @@ -153,12 +146,11 @@ type joinGroupResponseV2 struct { // MemberID assigned by the group coordinator MemberID string - Members []joinGroupResponseMemberV2 + Members []joinGroupResponseMemberV1 } -func (t joinGroupResponseV2) size() int32 { - return sizeofInt32(t.ThrottleTimeMS) + - sizeofInt16(t.ErrorCode) + +func (t joinGroupResponseV1) size() int32 { + return sizeofInt16(t.ErrorCode) + sizeofInt32(t.GenerationID) + sizeofString(t.GroupProtocol) + sizeofString(t.LeaderID) + @@ -166,21 +158,17 @@ func (t joinGroupResponseV2) size() int32 { sizeofArray(len(t.MemberID), func(i int) int32 { return t.Members[i].size() }) } -func (t joinGroupResponseV2) writeTo(w *bufio.Writer) { - writeInt32(w, t.ThrottleTimeMS) - writeInt16(w, t.ErrorCode) - writeInt32(w, t.GenerationID) - writeString(w, t.GroupProtocol) - writeString(w, t.LeaderID) - writeString(w, t.MemberID) - writeArray(w, len(t.Members), func(i int) { t.Members[i].writeTo(w) }) +func (t joinGroupResponseV1) writeTo(wb *writeBuffer) { + wb.writeInt16(t.ErrorCode) + wb.writeInt32(t.GenerationID) + wb.writeString(t.GroupProtocol) + wb.writeString(t.LeaderID) + wb.writeString(t.MemberID) + wb.writeArray(len(t.Members), func(i int) { t.Members[i].writeTo(wb) }) } -func (t *joinGroupResponseV2) readFrom(r *bufio.Reader, size int) (remain int, err error) { - if remain, err = readInt32(r, size, &t.ThrottleTimeMS); err != nil { - return - } - if remain, err = readInt16(r, remain, &t.ErrorCode); err != nil { +func (t *joinGroupResponseV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { + if remain, err = readInt16(r, size, &t.ErrorCode); err != nil { return } if remain, err = readInt32(r, remain, &t.GenerationID); err != nil { @@ -197,7 +185,7 @@ func (t *joinGroupResponseV2) readFrom(r *bufio.Reader, size int) (remain int, e } fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { - var item joinGroupResponseMemberV2 + var item joinGroupResponseMemberV1 if fnRemain, fnErr = (&item).readFrom(r, size); fnErr != nil { return } diff --git a/vendor/github.com/segmentio/kafka-go/leavegroup.go b/vendor/github.com/segmentio/kafka-go/leavegroup.go index e3625d473..b77036154 100644 --- a/vendor/github.com/segmentio/kafka-go/leavegroup.go +++ b/vendor/github.com/segmentio/kafka-go/leavegroup.go @@ -2,7 +2,7 @@ package kafka import "bufio" -type leaveGroupRequestV1 struct { +type leaveGroupRequestV0 struct { // GroupID holds the unique group identifier GroupID string @@ -11,42 +11,29 @@ type leaveGroupRequestV1 struct { MemberID string } -func (t leaveGroupRequestV1) size() int32 { - return sizeofString(t.GroupID) + - sizeofString(t.MemberID) +func (t leaveGroupRequestV0) size() int32 { + return sizeofString(t.GroupID) + sizeofString(t.MemberID) } -func (t leaveGroupRequestV1) writeTo(w *bufio.Writer) { - writeString(w, t.GroupID) - writeString(w, t.MemberID) +func (t leaveGroupRequestV0) writeTo(wb *writeBuffer) { + wb.writeString(t.GroupID) + wb.writeString(t.MemberID) } -type leaveGroupResponseV1 struct { - // ThrottleTimeMS holds the duration in milliseconds for which the request - // was throttled due to quota violation (Zero if the request did not violate - // any quota) - ThrottleTimeMS int32 - +type leaveGroupResponseV0 struct { // ErrorCode holds response error code ErrorCode int16 } -func (t leaveGroupResponseV1) size() int32 { - return sizeofInt32(t.ThrottleTimeMS) + - sizeofInt16(t.ErrorCode) +func (t leaveGroupResponseV0) size() int32 { + return sizeofInt16(t.ErrorCode) } -func (t leaveGroupResponseV1) writeTo(w *bufio.Writer) { - writeInt32(w, t.ThrottleTimeMS) - writeInt16(w, t.ErrorCode) +func (t leaveGroupResponseV0) writeTo(wb *writeBuffer) { + wb.writeInt16(t.ErrorCode) } -func (t *leaveGroupResponseV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { - if remain, err = readInt32(r, size, &t.ThrottleTimeMS); err != nil { - return - } - if remain, err = readInt16(r, remain, &t.ErrorCode); err != nil { - return - } +func (t *leaveGroupResponseV0) readFrom(r *bufio.Reader, size int) (remain int, err error) { + remain, err = readInt16(r, size, &t.ErrorCode) return } diff --git a/vendor/github.com/segmentio/kafka-go/listgroups.go b/vendor/github.com/segmentio/kafka-go/listgroups.go index a369f7d09..9f5e4438d 100644 --- a/vendor/github.com/segmentio/kafka-go/listgroups.go +++ b/vendor/github.com/segmentio/kafka-go/listgroups.go @@ -11,7 +11,7 @@ func (t listGroupsRequestV1) size() int32 { return 0 } -func (t listGroupsRequestV1) writeTo(w *bufio.Writer) { +func (t listGroupsRequestV1) writeTo(wb *writeBuffer) { } type ListGroupsResponseGroupV1 struct { @@ -21,13 +21,12 @@ type ListGroupsResponseGroupV1 struct { } func (t ListGroupsResponseGroupV1) size() int32 { - return sizeofString(t.GroupID) + - sizeofString(t.ProtocolType) + return sizeofString(t.GroupID) + sizeofString(t.ProtocolType) } -func (t ListGroupsResponseGroupV1) writeTo(w *bufio.Writer) { - writeString(w, t.GroupID) - writeString(w, t.ProtocolType) +func (t ListGroupsResponseGroupV1) writeTo(wb *writeBuffer) { + wb.writeString(t.GroupID) + wb.writeString(t.ProtocolType) } func (t *ListGroupsResponseGroupV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { @@ -57,10 +56,10 @@ func (t listGroupsResponseV1) size() int32 { sizeofArray(len(t.Groups), func(i int) int32 { return t.Groups[i].size() }) } -func (t listGroupsResponseV1) writeTo(w *bufio.Writer) { - writeInt32(w, t.ThrottleTimeMS) - writeInt16(w, t.ErrorCode) - writeArray(w, len(t.Groups), func(i int) { t.Groups[i].writeTo(w) }) +func (t listGroupsResponseV1) writeTo(wb *writeBuffer) { + wb.writeInt32(t.ThrottleTimeMS) + wb.writeInt16(t.ErrorCode) + wb.writeArray(len(t.Groups), func(i int) { t.Groups[i].writeTo(wb) }) } func (t *listGroupsResponseV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { diff --git a/vendor/github.com/segmentio/kafka-go/listoffset.go b/vendor/github.com/segmentio/kafka-go/listoffset.go index 12903197c..7988b4c87 100644 --- a/vendor/github.com/segmentio/kafka-go/listoffset.go +++ b/vendor/github.com/segmentio/kafka-go/listoffset.go @@ -11,9 +11,9 @@ func (r listOffsetRequestV1) size() int32 { return 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() }) } -func (r listOffsetRequestV1) writeTo(w *bufio.Writer) { - writeInt32(w, r.ReplicaID) - writeArray(w, len(r.Topics), func(i int) { r.Topics[i].writeTo(w) }) +func (r listOffsetRequestV1) writeTo(wb *writeBuffer) { + wb.writeInt32(r.ReplicaID) + wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) }) } type listOffsetRequestTopicV1 struct { @@ -26,9 +26,9 @@ func (t listOffsetRequestTopicV1) size() int32 { sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() }) } -func (t listOffsetRequestTopicV1) writeTo(w *bufio.Writer) { - writeString(w, t.TopicName) - writeArray(w, len(t.Partitions), func(i int) { t.Partitions[i].writeTo(w) }) +func (t listOffsetRequestTopicV1) writeTo(wb *writeBuffer) { + wb.writeString(t.TopicName) + wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) }) } type listOffsetRequestPartitionV1 struct { @@ -40,9 +40,9 @@ func (p listOffsetRequestPartitionV1) size() int32 { return 4 + 8 } -func (p listOffsetRequestPartitionV1) writeTo(w *bufio.Writer) { - writeInt32(w, p.Partition) - writeInt64(w, p.Time) +func (p listOffsetRequestPartitionV1) writeTo(wb *writeBuffer) { + wb.writeInt32(p.Partition) + wb.writeInt64(p.Time) } type listOffsetResponseV1 []listOffsetResponseTopicV1 @@ -51,8 +51,8 @@ func (r listOffsetResponseV1) size() int32 { return sizeofArray(len(r), func(i int) int32 { return r[i].size() }) } -func (r listOffsetResponseV1) writeTo(w *bufio.Writer) { - writeArray(w, len(r), func(i int) { r[i].writeTo(w) }) +func (r listOffsetResponseV1) writeTo(wb *writeBuffer) { + wb.writeArray(len(r), func(i int) { r[i].writeTo(wb) }) } type listOffsetResponseTopicV1 struct { @@ -65,9 +65,9 @@ func (t listOffsetResponseTopicV1) size() int32 { sizeofArray(len(t.PartitionOffsets), func(i int) int32 { return t.PartitionOffsets[i].size() }) } -func (t listOffsetResponseTopicV1) writeTo(w *bufio.Writer) { - writeString(w, t.TopicName) - writeArray(w, len(t.PartitionOffsets), func(i int) { t.PartitionOffsets[i].writeTo(w) }) +func (t listOffsetResponseTopicV1) writeTo(wb *writeBuffer) { + wb.writeString(t.TopicName) + wb.writeArray(len(t.PartitionOffsets), func(i int) { t.PartitionOffsets[i].writeTo(wb) }) } type partitionOffsetV1 struct { @@ -81,11 +81,11 @@ func (p partitionOffsetV1) size() int32 { return 4 + 2 + 8 + 8 } -func (p partitionOffsetV1) writeTo(w *bufio.Writer) { - writeInt32(w, p.Partition) - writeInt16(w, p.ErrorCode) - writeInt64(w, p.Timestamp) - writeInt64(w, p.Offset) +func (p partitionOffsetV1) writeTo(wb *writeBuffer) { + wb.writeInt32(p.Partition) + wb.writeInt16(p.ErrorCode) + wb.writeInt64(p.Timestamp) + wb.writeInt64(p.Offset) } func (p *partitionOffsetV1) readFrom(r *bufio.Reader, sz int) (remain int, err error) { diff --git a/vendor/github.com/segmentio/kafka-go/message.go b/vendor/github.com/segmentio/kafka-go/message.go index 96e9afbee..aae06b5a9 100644 --- a/vendor/github.com/segmentio/kafka-go/message.go +++ b/vendor/github.com/segmentio/kafka-go/message.go @@ -2,6 +2,9 @@ package kafka import ( "bufio" + "bytes" + "fmt" + "io" "time" ) @@ -15,29 +18,23 @@ type Message struct { Offset int64 Key []byte Value []byte + Headers []Header // If not set at the creation, Time will be automatically set when // writing the message. Time time.Time } -func (msg Message) item() messageSetItem { - item := messageSetItem{ - Offset: msg.Offset, - Message: msg.message(), - } - item.MessageSize = item.Message.size() - return item -} - -func (msg Message) message() message { +func (msg Message) message(cw *crc32Writer) message { m := message{ MagicByte: 1, Key: msg.Key, Value: msg.Value, Timestamp: timestamp(msg.Time), } - m.CRC = m.crc32() + if cw != nil { + m.CRC = m.crc32(cw) + } return m } @@ -50,8 +47,16 @@ type message struct { Value []byte } -func (m message) crc32() int32 { - return int32(crc32OfMessage(m.MagicByte, m.Attributes, m.Timestamp, m.Key, m.Value)) +func (m message) crc32(cw *crc32Writer) int32 { + cw.crc32 = 0 + cw.writeInt8(m.MagicByte) + cw.writeInt8(m.Attributes) + if m.MagicByte != 0 { + cw.writeInt64(m.Timestamp) + } + cw.writeBytes(m.Key) + cw.writeBytes(m.Value) + return int32(cw.crc32) } func (m message) size() int32 { @@ -62,15 +67,15 @@ func (m message) size() int32 { return size } -func (m message) writeTo(w *bufio.Writer) { - writeInt32(w, m.CRC) - writeInt8(w, m.MagicByte) - writeInt8(w, m.Attributes) +func (m message) writeTo(wb *writeBuffer) { + wb.writeInt32(m.CRC) + wb.writeInt8(m.MagicByte) + wb.writeInt8(m.Attributes) if m.MagicByte != 0 { - writeInt64(w, m.Timestamp) + wb.writeInt64(m.Timestamp) } - writeBytes(w, m.Key) - writeBytes(w, m.Value) + wb.writeBytes(m.Key) + wb.writeBytes(m.Value) } type messageSetItem struct { @@ -83,10 +88,10 @@ func (m messageSetItem) size() int32 { return 8 + 4 + m.Message.size() } -func (m messageSetItem) writeTo(w *bufio.Writer) { - writeInt64(w, m.Offset) - writeInt32(w, m.MessageSize) - m.Message.writeTo(w) +func (m messageSetItem) writeTo(wb *writeBuffer) { + wb.writeInt64(m.Offset) + wb.writeInt32(m.MessageSize) + m.Message.writeTo(wb) } type messageSet []messageSetItem @@ -98,8 +103,485 @@ func (s messageSet) size() (size int32) { return } -func (s messageSet) writeTo(w *bufio.Writer) { +func (s messageSet) writeTo(wb *writeBuffer) { for _, m := range s { - m.writeTo(w) + m.writeTo(wb) + } +} + +type messageSetReader struct { + empty bool + version int + v1 messageSetReaderV1 + v2 messageSetReaderV2 +} + +func (r *messageSetReader) readMessage(min int64, + key func(*bufio.Reader, int, int) (int, error), + val func(*bufio.Reader, int, int) (int, error), +) (offset int64, timestamp int64, headers []Header, err error) { + if r.empty { + return 0, 0, nil, RequestTimedOut + } + switch r.version { + case 1: + return r.v1.readMessage(min, key, val) + case 2: + return r.v2.readMessage(min, key, val) + default: + panic("Invalid messageSetReader - unknown message reader version") + } +} + +func (r *messageSetReader) remaining() (remain int) { + if r.empty { + return 0 + } + switch r.version { + case 1: + return r.v1.remaining() + case 2: + return r.v2.remaining() + default: + panic("Invalid messageSetReader - unknown message reader version") + } +} + +func (r *messageSetReader) discard() (err error) { + if r.empty { + return nil + } + switch r.version { + case 1: + return r.v1.discard() + case 2: + return r.v2.discard() + default: + panic("Invalid messageSetReader - unknown message reader version") + } +} + +type messageSetReaderV1 struct { + *readerStack +} + +type readerStack struct { + reader *bufio.Reader + remain int + base int64 + parent *readerStack +} + +func newMessageSetReader(reader *bufio.Reader, remain int) (*messageSetReader, error) { + headerLength := 8 + 4 + 4 + 1 // offset + messageSize + crc + magicByte + + if headerLength > remain { + return nil, errShortRead + } + + b, err := reader.Peek(headerLength) + if err != nil { + return nil, err + } + var version int8 = int8(b[headerLength-1]) + + switch version { + case 0, 1: + return &messageSetReader{ + version: 1, + v1: messageSetReaderV1{&readerStack{ + reader: reader, + remain: remain, + }}}, nil + case 2: + mr := &messageSetReader{ + version: 2, + v2: messageSetReaderV2{ + readerStack: &readerStack{ + reader: reader, + remain: remain, + }, + messageCount: 0, + }} + return mr, nil + default: + return nil, fmt.Errorf("unsupported message version %d found in fetch response", version) + } +} + +func (r *messageSetReaderV1) readMessage(min int64, + key func(*bufio.Reader, int, int) (int, error), + val func(*bufio.Reader, int, int) (int, error), +) (offset int64, timestamp int64, headers []Header, err error) { + for r.readerStack != nil { + if r.remain == 0 { + r.readerStack = r.parent + continue + } + + var attributes int8 + if offset, attributes, timestamp, r.remain, err = readMessageHeader(r.reader, r.remain); err != nil { + return + } + + // if the message is compressed, decompress it and push a new reader + // onto the stack. + code := attributes & compressionCodecMask + if code != 0 { + var codec CompressionCodec + if codec, err = resolveCodec(code); err != nil { + return + } + + // discard next four bytes...will be -1 to indicate null key + if r.remain, err = discardN(r.reader, r.remain, 4); err != nil { + return + } + + // read and decompress the contained message set. + var decompressed bytes.Buffer + + if r.remain, err = readBytesWith(r.reader, r.remain, func(r *bufio.Reader, sz, n int) (remain int, err error) { + // x4 as a guess that the average compression ratio is near 75% + decompressed.Grow(4 * n) + + l := io.LimitedReader{R: r, N: int64(n)} + d := codec.NewReader(&l) + + _, err = decompressed.ReadFrom(d) + remain = sz - (n - int(l.N)) + + d.Close() + return + }); err != nil { + return + } + + // the compressed message's offset will be equal to the offset of + // the last message in the set. within the compressed set, the + // offsets will be relative, so we have to scan through them to + // get the base offset. for example, if there are four compressed + // messages at offsets 10-13, then the container message will have + // offset 13 and the contained messages will be 0,1,2,3. the base + // offset for the container, then is 13-3=10. + if offset, err = extractOffset(offset, decompressed.Bytes()); err != nil { + return + } + + r.readerStack = &readerStack{ + // Allocate a buffer of size 0, which gets capped at 16 bytes + // by the bufio package. We are already reading buffered data + // here, no need to reserve another 4KB buffer. + reader: bufio.NewReaderSize(&decompressed, 0), + remain: decompressed.Len(), + base: offset, + parent: r.readerStack, + } + continue + } + + // adjust the offset in case we're reading compressed messages. the + // base will be zero otherwise. + offset += r.base + + // When the messages are compressed kafka may return messages at an + // earlier offset than the one that was requested, it's the client's + // responsibility to ignore those. + if offset < min { + if r.remain, err = discardBytes(r.reader, r.remain); err != nil { + return + } + if r.remain, err = discardBytes(r.reader, r.remain); err != nil { + return + } + continue + } + + if r.remain, err = readBytesWith(r.reader, r.remain, key); err != nil { + return + } + r.remain, err = readBytesWith(r.reader, r.remain, val) + return + } + + err = errShortRead + return +} + +func (r *messageSetReaderV1) remaining() (remain int) { + for s := r.readerStack; s != nil; s = s.parent { + remain += s.remain + } + return +} + +func (r *messageSetReaderV1) discard() (err error) { + if r.readerStack == nil { + return } + // rewind up to the top-most reader b/c it's the only one that's doing + // actual i/o. the rest are byte buffers that have been pushed on the stack + // while reading compressed message sets. + for r.parent != nil { + r.readerStack = r.parent + } + r.remain, err = discardN(r.reader, r.remain, r.remain) + return +} + +func extractOffset(base int64, msgSet []byte) (offset int64, err error) { + r, remain := bufio.NewReader(bytes.NewReader(msgSet)), len(msgSet) + for remain > 0 { + if remain, err = readInt64(r, remain, &offset); err != nil { + return + } + var sz int32 + if remain, err = readInt32(r, remain, &sz); err != nil { + return + } + if remain, err = discardN(r, remain, int(sz)); err != nil { + return + } + } + offset = base - offset + return +} + +type Header struct { + Key string + Value []byte +} + +type messageSetHeaderV2 struct { + firstOffset int64 + length int32 + partitionLeaderEpoch int32 + magic int8 + crc int32 + batchAttributes int16 + lastOffsetDelta int32 + firstTimestamp int64 + maxTimestamp int64 + producerId int64 + producerEpoch int16 + firstSequence int32 +} + +type timestampType int8 + +const ( + createTime timestampType = 0 + logAppendTime timestampType = 1 +) + +type transactionType int8 + +const ( + nonTransactional transactionType = 0 + transactional transactionType = 1 +) + +type controlType int8 + +const ( + nonControlMessage controlType = 0 + controlMessage controlType = 1 +) + +func (h *messageSetHeaderV2) compression() int8 { + return int8(h.batchAttributes & 7) +} + +func (h *messageSetHeaderV2) timestampType() timestampType { + return timestampType((h.batchAttributes & (1 << 3)) >> 3) +} + +func (h *messageSetHeaderV2) transactionType() transactionType { + return transactionType((h.batchAttributes & (1 << 4)) >> 4) +} + +func (h *messageSetHeaderV2) controlType() controlType { + return controlType((h.batchAttributes & (1 << 5)) >> 5) +} + +type messageSetReaderV2 struct { + *readerStack + messageCount int + + header messageSetHeaderV2 +} + +func (r *messageSetReaderV2) readHeader() (err error) { + h := &r.header + if r.remain, err = readInt64(r.reader, r.remain, &h.firstOffset); err != nil { + return + } + if r.remain, err = readInt32(r.reader, r.remain, &h.length); err != nil { + return + } + if r.remain, err = readInt32(r.reader, r.remain, &h.partitionLeaderEpoch); err != nil { + return + } + if r.remain, err = readInt8(r.reader, r.remain, &h.magic); err != nil { + return + } + if r.remain, err = readInt32(r.reader, r.remain, &h.crc); err != nil { + return + } + if r.remain, err = readInt16(r.reader, r.remain, &h.batchAttributes); err != nil { + return + } + if r.remain, err = readInt32(r.reader, r.remain, &h.lastOffsetDelta); err != nil { + return + } + if r.remain, err = readInt64(r.reader, r.remain, &h.firstTimestamp); err != nil { + return + } + if r.remain, err = readInt64(r.reader, r.remain, &h.maxTimestamp); err != nil { + return + } + if r.remain, err = readInt64(r.reader, r.remain, &h.producerId); err != nil { + return + } + if r.remain, err = readInt16(r.reader, r.remain, &h.producerEpoch); err != nil { + return + } + if r.remain, err = readInt32(r.reader, r.remain, &h.firstSequence); err != nil { + return + } + var messageCount int32 + if r.remain, err = readInt32(r.reader, r.remain, &messageCount); err != nil { + return + } + r.messageCount = int(messageCount) + + return nil +} + +func (r *messageSetReaderV2) readMessage(min int64, + key func(*bufio.Reader, int, int) (int, error), + val func(*bufio.Reader, int, int) (int, error), +) (offset int64, timestamp int64, headers []Header, err error) { + + if r.messageCount == 0 { + if r.remain == 0 { + if r.parent != nil { + r.readerStack = r.parent + } + } + + if err = r.readHeader(); err != nil { + return + } + + if code := r.header.compression(); code != 0 { + var codec CompressionCodec + if codec, err = resolveCodec(code); err != nil { + return + } + + var batchRemain = int(r.header.length - 49) + if batchRemain > r.remain { + err = errShortRead + return + } + + var decompressed bytes.Buffer + decompressed.Grow(4 * batchRemain) + + l := io.LimitedReader{R: r.reader, N: int64(batchRemain)} + d := codec.NewReader(&l) + + _, err = decompressed.ReadFrom(d) + r.remain = r.remain - (batchRemain - int(l.N)) + d.Close() + + if err != nil { + return + } + + r.readerStack = &readerStack{ + reader: bufio.NewReaderSize(&decompressed, 0), + remain: decompressed.Len(), + base: -1, // base is unused here + parent: r.readerStack, + } + } + } + + var length int64 + if r.remain, err = readVarInt(r.reader, r.remain, &length); err != nil { + return + } + + var attrs int8 + if r.remain, err = readInt8(r.reader, r.remain, &attrs); err != nil { + return + } + var timestampDelta int64 + if r.remain, err = readVarInt(r.reader, r.remain, ×tampDelta); err != nil { + return + } + var offsetDelta int64 + if r.remain, err = readVarInt(r.reader, r.remain, &offsetDelta); err != nil { + return + } + var keyLen int64 + if r.remain, err = readVarInt(r.reader, r.remain, &keyLen); err != nil { + return + } + + if r.remain, err = key(r.reader, r.remain, int(keyLen)); err != nil { + return + } + var valueLen int64 + if r.remain, err = readVarInt(r.reader, r.remain, &valueLen); err != nil { + return + } + + if r.remain, err = val(r.reader, r.remain, int(valueLen)); err != nil { + return + } + + var headerCount int64 + if r.remain, err = readVarInt(r.reader, r.remain, &headerCount); err != nil { + return + } + + headers = make([]Header, headerCount) + + for i := 0; i < int(headerCount); i++ { + if err = r.readMessageHeader(&headers[i]); err != nil { + return + } + } + r.messageCount-- + return r.header.firstOffset + offsetDelta, r.header.firstTimestamp + timestampDelta, headers, nil +} + +func (r *messageSetReaderV2) readMessageHeader(header *Header) (err error) { + var keyLen int64 + if r.remain, err = readVarInt(r.reader, r.remain, &keyLen); err != nil { + return + } + if header.Key, r.remain, err = readNewString(r.reader, r.remain, int(keyLen)); err != nil { + return + } + var valLen int64 + if r.remain, err = readVarInt(r.reader, r.remain, &valLen); err != nil { + return + } + if header.Value, r.remain, err = readNewBytes(r.reader, r.remain, int(valLen)); err != nil { + return + } + return nil +} + +func (r *messageSetReaderV2) remaining() (remain int) { + return r.remain +} + +func (r *messageSetReaderV2) discard() (err error) { + r.remain, err = discardN(r.reader, r.remain, r.remain) + return } diff --git a/vendor/github.com/segmentio/kafka-go/metadata.go b/vendor/github.com/segmentio/kafka-go/metadata.go index 51fe98b22..56e12d0e9 100644 --- a/vendor/github.com/segmentio/kafka-go/metadata.go +++ b/vendor/github.com/segmentio/kafka-go/metadata.go @@ -1,68 +1,72 @@ package kafka -import "bufio" +type topicMetadataRequestV1 []string -type topicMetadataRequestV0 []string - -func (r topicMetadataRequestV0) size() int32 { +func (r topicMetadataRequestV1) size() int32 { return sizeofStringArray([]string(r)) } -func (r topicMetadataRequestV0) writeTo(w *bufio.Writer) { - writeStringArray(w, []string(r)) +func (r topicMetadataRequestV1) writeTo(wb *writeBuffer) { + wb.writeStringArray([]string(r)) } -type metadataResponseV0 struct { - Brokers []brokerMetadataV0 - Topics []topicMetadataV0 +type metadataResponseV1 struct { + Brokers []brokerMetadataV1 + ControllerID int32 + Topics []topicMetadataV1 } -func (r metadataResponseV0) size() int32 { +func (r metadataResponseV1) size() int32 { n1 := sizeofArray(len(r.Brokers), func(i int) int32 { return r.Brokers[i].size() }) n2 := sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() }) - return n1 + n2 + return 4 + n1 + n2 } -func (r metadataResponseV0) writeTo(w *bufio.Writer) { - writeArray(w, len(r.Brokers), func(i int) { r.Brokers[i].writeTo(w) }) - writeArray(w, len(r.Topics), func(i int) { r.Topics[i].writeTo(w) }) +func (r metadataResponseV1) writeTo(wb *writeBuffer) { + wb.writeArray(len(r.Brokers), func(i int) { r.Brokers[i].writeTo(wb) }) + wb.writeInt32(r.ControllerID) + wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) }) } -type brokerMetadataV0 struct { +type brokerMetadataV1 struct { NodeID int32 Host string Port int32 + Rack string } -func (b brokerMetadataV0) size() int32 { - return 4 + 4 + sizeofString(b.Host) +func (b brokerMetadataV1) size() int32 { + return 4 + 4 + sizeofString(b.Host) + sizeofString(b.Rack) } -func (b brokerMetadataV0) writeTo(w *bufio.Writer) { - writeInt32(w, b.NodeID) - writeString(w, b.Host) - writeInt32(w, b.Port) +func (b brokerMetadataV1) writeTo(wb *writeBuffer) { + wb.writeInt32(b.NodeID) + wb.writeString(b.Host) + wb.writeInt32(b.Port) + wb.writeString(b.Rack) } -type topicMetadataV0 struct { +type topicMetadataV1 struct { TopicErrorCode int16 TopicName string - Partitions []partitionMetadataV0 + Internal bool + Partitions []partitionMetadataV1 } -func (t topicMetadataV0) size() int32 { - return 2 + +func (t topicMetadataV1) size() int32 { + return 2 + 1 + sizeofString(t.TopicName) + sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() }) } -func (t topicMetadataV0) writeTo(w *bufio.Writer) { - writeInt16(w, t.TopicErrorCode) - writeString(w, t.TopicName) - writeArray(w, len(t.Partitions), func(i int) { t.Partitions[i].writeTo(w) }) +func (t topicMetadataV1) writeTo(wb *writeBuffer) { + wb.writeInt16(t.TopicErrorCode) + wb.writeString(t.TopicName) + wb.writeBool(t.Internal) + wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) }) } -type partitionMetadataV0 struct { +type partitionMetadataV1 struct { PartitionErrorCode int16 PartitionID int32 Leader int32 @@ -70,14 +74,14 @@ type partitionMetadataV0 struct { Isr []int32 } -func (p partitionMetadataV0) size() int32 { +func (p partitionMetadataV1) size() int32 { return 2 + 4 + 4 + sizeofInt32Array(p.Replicas) + sizeofInt32Array(p.Isr) } -func (p partitionMetadataV0) writeTo(w *bufio.Writer) { - writeInt16(w, p.PartitionErrorCode) - writeInt32(w, p.PartitionID) - writeInt32(w, p.Leader) - writeInt32Array(w, p.Replicas) - writeInt32Array(w, p.Isr) +func (p partitionMetadataV1) writeTo(wb *writeBuffer) { + wb.writeInt16(p.PartitionErrorCode) + wb.writeInt32(p.PartitionID) + wb.writeInt32(p.Leader) + wb.writeInt32Array(p.Replicas) + wb.writeInt32Array(p.Isr) } diff --git a/vendor/github.com/segmentio/kafka-go/offsetcommit.go b/vendor/github.com/segmentio/kafka-go/offsetcommit.go index 4de7814ab..22b25d52a 100644 --- a/vendor/github.com/segmentio/kafka-go/offsetcommit.go +++ b/vendor/github.com/segmentio/kafka-go/offsetcommit.go @@ -2,7 +2,7 @@ package kafka import "bufio" -type offsetCommitRequestV3Partition struct { +type offsetCommitRequestV2Partition struct { // Partition ID Partition int32 @@ -13,37 +13,37 @@ type offsetCommitRequestV3Partition struct { Metadata string } -func (t offsetCommitRequestV3Partition) size() int32 { +func (t offsetCommitRequestV2Partition) size() int32 { return sizeofInt32(t.Partition) + sizeofInt64(t.Offset) + sizeofString(t.Metadata) } -func (t offsetCommitRequestV3Partition) writeTo(w *bufio.Writer) { - writeInt32(w, t.Partition) - writeInt64(w, t.Offset) - writeString(w, t.Metadata) +func (t offsetCommitRequestV2Partition) writeTo(wb *writeBuffer) { + wb.writeInt32(t.Partition) + wb.writeInt64(t.Offset) + wb.writeString(t.Metadata) } -type offsetCommitRequestV3Topic struct { +type offsetCommitRequestV2Topic struct { // Topic name Topic string // Partitions to commit offsets - Partitions []offsetCommitRequestV3Partition + Partitions []offsetCommitRequestV2Partition } -func (t offsetCommitRequestV3Topic) size() int32 { +func (t offsetCommitRequestV2Topic) size() int32 { return sizeofString(t.Topic) + sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() }) } -func (t offsetCommitRequestV3Topic) writeTo(w *bufio.Writer) { - writeString(w, t.Topic) - writeArray(w, len(t.Partitions), func(i int) { t.Partitions[i].writeTo(w) }) +func (t offsetCommitRequestV2Topic) writeTo(wb *writeBuffer) { + wb.writeString(t.Topic) + wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) }) } -type offsetCommitRequestV3 struct { +type offsetCommitRequestV2 struct { // GroupID holds the unique group identifier GroupID string @@ -57,10 +57,10 @@ type offsetCommitRequestV3 struct { RetentionTime int64 // Topics to commit offsets - Topics []offsetCommitRequestV3Topic + Topics []offsetCommitRequestV2Topic } -func (t offsetCommitRequestV3) size() int32 { +func (t offsetCommitRequestV2) size() int32 { return sizeofString(t.GroupID) + sizeofInt32(t.GenerationID) + sizeofString(t.MemberID) + @@ -68,32 +68,32 @@ func (t offsetCommitRequestV3) size() int32 { sizeofArray(len(t.Topics), func(i int) int32 { return t.Topics[i].size() }) } -func (t offsetCommitRequestV3) writeTo(w *bufio.Writer) { - writeString(w, t.GroupID) - writeInt32(w, t.GenerationID) - writeString(w, t.MemberID) - writeInt64(w, t.RetentionTime) - writeArray(w, len(t.Topics), func(i int) { t.Topics[i].writeTo(w) }) +func (t offsetCommitRequestV2) writeTo(wb *writeBuffer) { + wb.writeString(t.GroupID) + wb.writeInt32(t.GenerationID) + wb.writeString(t.MemberID) + wb.writeInt64(t.RetentionTime) + wb.writeArray(len(t.Topics), func(i int) { t.Topics[i].writeTo(wb) }) } -type offsetCommitResponseV3PartitionResponse struct { +type offsetCommitResponseV2PartitionResponse struct { Partition int32 // ErrorCode holds response error code ErrorCode int16 } -func (t offsetCommitResponseV3PartitionResponse) size() int32 { +func (t offsetCommitResponseV2PartitionResponse) size() int32 { return sizeofInt32(t.Partition) + sizeofInt16(t.ErrorCode) } -func (t offsetCommitResponseV3PartitionResponse) writeTo(w *bufio.Writer) { - writeInt32(w, t.Partition) - writeInt16(w, t.ErrorCode) +func (t offsetCommitResponseV2PartitionResponse) writeTo(wb *writeBuffer) { + wb.writeInt32(t.Partition) + wb.writeInt16(t.ErrorCode) } -func (t *offsetCommitResponseV3PartitionResponse) readFrom(r *bufio.Reader, size int) (remain int, err error) { +func (t *offsetCommitResponseV2PartitionResponse) readFrom(r *bufio.Reader, size int) (remain int, err error) { if remain, err = readInt32(r, size, &t.Partition); err != nil { return } @@ -103,28 +103,28 @@ func (t *offsetCommitResponseV3PartitionResponse) readFrom(r *bufio.Reader, size return } -type offsetCommitResponseV3Response struct { +type offsetCommitResponseV2Response struct { Topic string - PartitionResponses []offsetCommitResponseV3PartitionResponse + PartitionResponses []offsetCommitResponseV2PartitionResponse } -func (t offsetCommitResponseV3Response) size() int32 { +func (t offsetCommitResponseV2Response) size() int32 { return sizeofString(t.Topic) + sizeofArray(len(t.PartitionResponses), func(i int) int32 { return t.PartitionResponses[i].size() }) } -func (t offsetCommitResponseV3Response) writeTo(w *bufio.Writer) { - writeString(w, t.Topic) - writeArray(w, len(t.PartitionResponses), func(i int) { t.PartitionResponses[i].writeTo(w) }) +func (t offsetCommitResponseV2Response) writeTo(wb *writeBuffer) { + wb.writeString(t.Topic) + wb.writeArray(len(t.PartitionResponses), func(i int) { t.PartitionResponses[i].writeTo(wb) }) } -func (t *offsetCommitResponseV3Response) readFrom(r *bufio.Reader, size int) (remain int, err error) { +func (t *offsetCommitResponseV2Response) readFrom(r *bufio.Reader, size int) (remain int, err error) { if remain, err = readString(r, size, &t.Topic); err != nil { return } fn := func(r *bufio.Reader, withSize int) (fnRemain int, fnErr error) { - item := offsetCommitResponseV3PartitionResponse{} + item := offsetCommitResponseV2PartitionResponse{} if fnRemain, fnErr = (&item).readFrom(r, withSize); fnErr != nil { return } @@ -138,38 +138,28 @@ func (t *offsetCommitResponseV3Response) readFrom(r *bufio.Reader, size int) (re return } -type offsetCommitResponseV3 struct { - // ThrottleTimeMS holds the duration in milliseconds for which the request - // was throttled due to quota violation (Zero if the request did not violate - // any quota) - ThrottleTimeMS int32 - Responses []offsetCommitResponseV3Response +type offsetCommitResponseV2 struct { + Responses []offsetCommitResponseV2Response } -func (t offsetCommitResponseV3) size() int32 { - return sizeofInt32(t.ThrottleTimeMS) + - sizeofArray(len(t.Responses), func(i int) int32 { return t.Responses[i].size() }) +func (t offsetCommitResponseV2) size() int32 { + return sizeofArray(len(t.Responses), func(i int) int32 { return t.Responses[i].size() }) } -func (t offsetCommitResponseV3) writeTo(w *bufio.Writer) { - writeInt32(w, t.ThrottleTimeMS) - writeArray(w, len(t.Responses), func(i int) { t.Responses[i].writeTo(w) }) +func (t offsetCommitResponseV2) writeTo(wb *writeBuffer) { + wb.writeArray(len(t.Responses), func(i int) { t.Responses[i].writeTo(wb) }) } -func (t *offsetCommitResponseV3) readFrom(r *bufio.Reader, size int) (remain int, err error) { - if remain, err = readInt32(r, size, &t.ThrottleTimeMS); err != nil { - return - } - +func (t *offsetCommitResponseV2) readFrom(r *bufio.Reader, size int) (remain int, err error) { fn := func(r *bufio.Reader, withSize int) (fnRemain int, fnErr error) { - item := offsetCommitResponseV3Response{} + item := offsetCommitResponseV2Response{} if fnRemain, fnErr = (&item).readFrom(r, withSize); fnErr != nil { return } t.Responses = append(t.Responses, item) return } - if remain, err = readArrayWith(r, remain, fn); err != nil { + if remain, err = readArrayWith(r, size, fn); err != nil { return } diff --git a/vendor/github.com/segmentio/kafka-go/offsetfetch.go b/vendor/github.com/segmentio/kafka-go/offsetfetch.go index 7931490da..91b037e3a 100644 --- a/vendor/github.com/segmentio/kafka-go/offsetfetch.go +++ b/vendor/github.com/segmentio/kafka-go/offsetfetch.go @@ -4,7 +4,7 @@ import ( "bufio" ) -type offsetFetchRequestV3Topic struct { +type offsetFetchRequestV1Topic struct { // Topic name Topic string @@ -12,35 +12,35 @@ type offsetFetchRequestV3Topic struct { Partitions []int32 } -func (t offsetFetchRequestV3Topic) size() int32 { +func (t offsetFetchRequestV1Topic) size() int32 { return sizeofString(t.Topic) + sizeofInt32Array(t.Partitions) } -func (t offsetFetchRequestV3Topic) writeTo(w *bufio.Writer) { - writeString(w, t.Topic) - writeInt32Array(w, t.Partitions) +func (t offsetFetchRequestV1Topic) writeTo(wb *writeBuffer) { + wb.writeString(t.Topic) + wb.writeInt32Array(t.Partitions) } -type offsetFetchRequestV3 struct { +type offsetFetchRequestV1 struct { // GroupID holds the unique group identifier GroupID string // Topics to fetch offsets. - Topics []offsetFetchRequestV3Topic + Topics []offsetFetchRequestV1Topic } -func (t offsetFetchRequestV3) size() int32 { +func (t offsetFetchRequestV1) size() int32 { return sizeofString(t.GroupID) + sizeofArray(len(t.Topics), func(i int) int32 { return t.Topics[i].size() }) } -func (t offsetFetchRequestV3) writeTo(w *bufio.Writer) { - writeString(w, t.GroupID) - writeArray(w, len(t.Topics), func(i int) { t.Topics[i].writeTo(w) }) +func (t offsetFetchRequestV1) writeTo(wb *writeBuffer) { + wb.writeString(t.GroupID) + wb.writeArray(len(t.Topics), func(i int) { t.Topics[i].writeTo(wb) }) } -type offsetFetchResponseV3PartitionResponse struct { +type offsetFetchResponseV1PartitionResponse struct { // Partition ID Partition int32 @@ -54,21 +54,21 @@ type offsetFetchResponseV3PartitionResponse struct { ErrorCode int16 } -func (t offsetFetchResponseV3PartitionResponse) size() int32 { +func (t offsetFetchResponseV1PartitionResponse) size() int32 { return sizeofInt32(t.Partition) + sizeofInt64(t.Offset) + sizeofString(t.Metadata) + sizeofInt16(t.ErrorCode) } -func (t offsetFetchResponseV3PartitionResponse) writeTo(w *bufio.Writer) { - writeInt32(w, t.Partition) - writeInt64(w, t.Offset) - writeString(w, t.Metadata) - writeInt16(w, t.ErrorCode) +func (t offsetFetchResponseV1PartitionResponse) writeTo(wb *writeBuffer) { + wb.writeInt32(t.Partition) + wb.writeInt64(t.Offset) + wb.writeString(t.Metadata) + wb.writeInt16(t.ErrorCode) } -func (t *offsetFetchResponseV3PartitionResponse) readFrom(r *bufio.Reader, size int) (remain int, err error) { +func (t *offsetFetchResponseV1PartitionResponse) readFrom(r *bufio.Reader, size int) (remain int, err error) { if remain, err = readInt32(r, size, &t.Partition); err != nil { return } @@ -84,31 +84,31 @@ func (t *offsetFetchResponseV3PartitionResponse) readFrom(r *bufio.Reader, size return } -type offsetFetchResponseV3Response struct { +type offsetFetchResponseV1Response struct { // Topic name Topic string // PartitionResponses holds offsets by partition - PartitionResponses []offsetFetchResponseV3PartitionResponse + PartitionResponses []offsetFetchResponseV1PartitionResponse } -func (t offsetFetchResponseV3Response) size() int32 { +func (t offsetFetchResponseV1Response) size() int32 { return sizeofString(t.Topic) + sizeofArray(len(t.PartitionResponses), func(i int) int32 { return t.PartitionResponses[i].size() }) } -func (t offsetFetchResponseV3Response) writeTo(w *bufio.Writer) { - writeString(w, t.Topic) - writeArray(w, len(t.PartitionResponses), func(i int) { t.PartitionResponses[i].writeTo(w) }) +func (t offsetFetchResponseV1Response) writeTo(wb *writeBuffer) { + wb.writeString(t.Topic) + wb.writeArray(len(t.PartitionResponses), func(i int) { t.PartitionResponses[i].writeTo(wb) }) } -func (t *offsetFetchResponseV3Response) readFrom(r *bufio.Reader, size int) (remain int, err error) { +func (t *offsetFetchResponseV1Response) readFrom(r *bufio.Reader, size int) (remain int, err error) { if remain, err = readString(r, size, &t.Topic); err != nil { return } fn := func(r *bufio.Reader, size int) (fnRemain int, fnErr error) { - item := offsetFetchResponseV3PartitionResponse{} + item := offsetFetchResponseV1PartitionResponse{} if fnRemain, fnErr = (&item).readFrom(r, size); err != nil { return } @@ -122,56 +122,36 @@ func (t *offsetFetchResponseV3Response) readFrom(r *bufio.Reader, size int) (rem return } -type offsetFetchResponseV3 struct { - // ThrottleTimeMS holds the duration in milliseconds for which the request - // was throttled due to quota violation (Zero if the request did not violate - // any quota) - ThrottleTimeMS int32 - +type offsetFetchResponseV1 struct { // Responses holds topic partition offsets - Responses []offsetFetchResponseV3Response - - // ErrorCode holds response error code - ErrorCode int16 + Responses []offsetFetchResponseV1Response } -func (t offsetFetchResponseV3) size() int32 { - return sizeofInt32(t.ThrottleTimeMS) + - sizeofArray(len(t.Responses), func(i int) int32 { return t.Responses[i].size() }) + - sizeofInt16(t.ErrorCode) +func (t offsetFetchResponseV1) size() int32 { + return sizeofArray(len(t.Responses), func(i int) int32 { return t.Responses[i].size() }) } -func (t offsetFetchResponseV3) writeTo(w *bufio.Writer) { - writeInt32(w, t.ThrottleTimeMS) - writeArray(w, len(t.Responses), func(i int) { t.Responses[i].writeTo(w) }) - writeInt16(w, t.ErrorCode) +func (t offsetFetchResponseV1) writeTo(wb *writeBuffer) { + wb.writeArray(len(t.Responses), func(i int) { t.Responses[i].writeTo(wb) }) } -func (t *offsetFetchResponseV3) readFrom(r *bufio.Reader, size int) (remain int, err error) { - if remain, err = readInt32(r, size, &t.ThrottleTimeMS); err != nil { - return - } - +func (t *offsetFetchResponseV1) readFrom(r *bufio.Reader, size int) (remain int, err error) { fn := func(r *bufio.Reader, withSize int) (fnRemain int, fnErr error) { - item := offsetFetchResponseV3Response{} + item := offsetFetchResponseV1Response{} if fnRemain, fnErr = (&item).readFrom(r, withSize); fnErr != nil { return } t.Responses = append(t.Responses, item) return } - if remain, err = readArrayWith(r, remain, fn); err != nil { - return - } - - if remain, err = readInt16(r, remain, &t.ErrorCode); err != nil { + if remain, err = readArrayWith(r, size, fn); err != nil { return } return } -func findOffset(topic string, partition int32, response offsetFetchResponseV3) (int64, bool) { +func findOffset(topic string, partition int32, response offsetFetchResponseV1) (int64, bool) { for _, r := range response.Responses { if r.Topic != topic { continue diff --git a/vendor/github.com/segmentio/kafka-go/produce.go b/vendor/github.com/segmentio/kafka-go/produce.go index 0eda7c811..449993e99 100644 --- a/vendor/github.com/segmentio/kafka-go/produce.go +++ b/vendor/github.com/segmentio/kafka-go/produce.go @@ -12,10 +12,10 @@ func (r produceRequestV2) size() int32 { return 2 + 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() }) } -func (r produceRequestV2) writeTo(w *bufio.Writer) { - writeInt16(w, r.RequiredAcks) - writeInt32(w, r.Timeout) - writeArray(w, len(r.Topics), func(i int) { r.Topics[i].writeTo(w) }) +func (r produceRequestV2) writeTo(wb *writeBuffer) { + wb.writeInt16(r.RequiredAcks) + wb.writeInt32(r.Timeout) + wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) }) } type produceRequestTopicV2 struct { @@ -28,9 +28,9 @@ func (t produceRequestTopicV2) size() int32 { sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() }) } -func (t produceRequestTopicV2) writeTo(w *bufio.Writer) { - writeString(w, t.TopicName) - writeArray(w, len(t.Partitions), func(i int) { t.Partitions[i].writeTo(w) }) +func (t produceRequestTopicV2) writeTo(wb *writeBuffer) { + wb.writeString(t.TopicName) + wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) }) } type produceRequestPartitionV2 struct { @@ -43,10 +43,10 @@ func (p produceRequestPartitionV2) size() int32 { return 4 + 4 + p.MessageSet.size() } -func (p produceRequestPartitionV2) writeTo(w *bufio.Writer) { - writeInt32(w, p.Partition) - writeInt32(w, p.MessageSetSize) - p.MessageSet.writeTo(w) +func (p produceRequestPartitionV2) writeTo(wb *writeBuffer) { + wb.writeInt32(p.Partition) + wb.writeInt32(p.MessageSetSize) + p.MessageSet.writeTo(wb) } type produceResponseV2 struct { @@ -58,9 +58,9 @@ func (r produceResponseV2) size() int32 { return 4 + sizeofArray(len(r.Topics), func(i int) int32 { return r.Topics[i].size() }) } -func (r produceResponseV2) writeTo(w *bufio.Writer) { - writeInt32(w, r.ThrottleTime) - writeArray(w, len(r.Topics), func(i int) { r.Topics[i].writeTo(w) }) +func (r produceResponseV2) writeTo(wb *writeBuffer) { + wb.writeInt32(r.ThrottleTime) + wb.writeArray(len(r.Topics), func(i int) { r.Topics[i].writeTo(wb) }) } type produceResponseTopicV2 struct { @@ -73,9 +73,9 @@ func (t produceResponseTopicV2) size() int32 { sizeofArray(len(t.Partitions), func(i int) int32 { return t.Partitions[i].size() }) } -func (t produceResponseTopicV2) writeTo(w *bufio.Writer) { - writeString(w, t.TopicName) - writeArray(w, len(t.Partitions), func(i int) { t.Partitions[i].writeTo(w) }) +func (t produceResponseTopicV2) writeTo(wb *writeBuffer) { + wb.writeString(t.TopicName) + wb.writeArray(len(t.Partitions), func(i int) { t.Partitions[i].writeTo(wb) }) } type produceResponsePartitionV2 struct { @@ -89,11 +89,11 @@ func (p produceResponsePartitionV2) size() int32 { return 4 + 2 + 8 + 8 } -func (p produceResponsePartitionV2) writeTo(w *bufio.Writer) { - writeInt32(w, p.Partition) - writeInt16(w, p.ErrorCode) - writeInt64(w, p.Offset) - writeInt64(w, p.Timestamp) +func (p produceResponsePartitionV2) writeTo(wb *writeBuffer) { + wb.writeInt32(p.Partition) + wb.writeInt16(p.ErrorCode) + wb.writeInt64(p.Offset) + wb.writeInt64(p.Timestamp) } func (p *produceResponsePartitionV2) readFrom(r *bufio.Reader, sz int) (remain int, err error) { @@ -111,3 +111,42 @@ func (p *produceResponsePartitionV2) readFrom(r *bufio.Reader, sz int) (remain i } return } + +type produceResponsePartitionV7 struct { + Partition int32 + ErrorCode int16 + Offset int64 + Timestamp int64 + StartOffset int64 +} + +func (p produceResponsePartitionV7) size() int32 { + return 4 + 2 + 8 + 8 + 8 +} + +func (p produceResponsePartitionV7) writeTo(wb *writeBuffer) { + wb.writeInt32(p.Partition) + wb.writeInt16(p.ErrorCode) + wb.writeInt64(p.Offset) + wb.writeInt64(p.Timestamp) + wb.writeInt64(p.StartOffset) +} + +func (p *produceResponsePartitionV7) readFrom(r *bufio.Reader, sz int) (remain int, err error) { + if remain, err = readInt32(r, sz, &p.Partition); err != nil { + return + } + if remain, err = readInt16(r, remain, &p.ErrorCode); err != nil { + return + } + if remain, err = readInt64(r, remain, &p.Offset); err != nil { + return + } + if remain, err = readInt64(r, remain, &p.Timestamp); err != nil { + return + } + if remain, err = readInt64(r, remain, &p.StartOffset); err != nil { + return + } + return +} diff --git a/vendor/github.com/segmentio/kafka-go/protocol.go b/vendor/github.com/segmentio/kafka-go/protocol.go index 30d987aff..449ad6e40 100644 --- a/vendor/github.com/segmentio/kafka-go/protocol.go +++ b/vendor/github.com/segmentio/kafka-go/protocol.go @@ -1,7 +1,6 @@ package kafka import ( - "bufio" "encoding/binary" "fmt" ) @@ -22,17 +21,23 @@ const ( syncGroupRequest apiKey = 14 describeGroupsRequest apiKey = 15 listGroupsRequest apiKey = 16 + saslHandshakeRequest apiKey = 17 + apiVersionsRequest apiKey = 18 createTopicsRequest apiKey = 19 deleteTopicsRequest apiKey = 20 + saslAuthenticateRequest apiKey = 36 ) type apiVersion int16 const ( - v0 apiVersion = 0 - v1 apiVersion = 1 - v2 apiVersion = 2 - v3 apiVersion = 3 + v0 apiVersion = 0 + v1 apiVersion = 1 + v2 apiVersion = 2 + v3 apiVersion = 3 + v5 apiVersion = 5 + v7 apiVersion = 7 + v10 apiVersion = 10 ) type requestHeader struct { @@ -47,17 +52,17 @@ func (h requestHeader) size() int32 { return 4 + 2 + 2 + 4 + sizeofString(h.ClientID) } -func (h requestHeader) writeTo(w *bufio.Writer) { - writeInt32(w, h.Size) - writeInt16(w, h.ApiKey) - writeInt16(w, h.ApiVersion) - writeInt32(w, h.CorrelationID) - writeString(w, h.ClientID) +func (h requestHeader) writeTo(wb *writeBuffer) { + wb.writeInt32(h.Size) + wb.writeInt16(h.ApiKey) + wb.writeInt16(h.ApiVersion) + wb.writeInt32(h.CorrelationID) + wb.writeString(h.ClientID) } type request interface { size() int32 - writeTo(*bufio.Writer) + writable } func makeInt8(b []byte) int8 { diff --git a/vendor/github.com/segmentio/kafka-go/read.go b/vendor/github.com/segmentio/kafka-go/read.go index d0b323819..d2dcb299a 100644 --- a/vendor/github.com/segmentio/kafka-go/read.go +++ b/vendor/github.com/segmentio/kafka-go/read.go @@ -42,6 +42,59 @@ func readInt64(r *bufio.Reader, sz int, v *int64) (int, error) { return peekRead(r, sz, 8, func(b []byte) { *v = makeInt64(b) }) } +func readVarInt(r *bufio.Reader, sz int, v *int64) (remain int, err error) { + // Optimistically assume that most of the time, there will be data buffered + // in the reader. If this is not the case, the buffer will be refilled after + // consuming zero bytes from the input. + input, _ := r.Peek(r.Buffered()) + x := uint64(0) + s := uint(0) + + for { + if len(input) > sz { + input = input[:sz] + } + + for i, b := range input { + if b < 0x80 { + x |= uint64(b) << s + *v = int64(x>>1) ^ -(int64(x) & 1) + n, err := r.Discard(i + 1) + return sz - n, err + } + + x |= uint64(b&0x7f) << s + s += 7 + } + + // Make room in the input buffer to load more data from the underlying + // stream. The x and s variables are left untouched, ensuring that the + // varint decoding can continue on the next loop iteration. + n, _ := r.Discard(len(input)) + sz -= n + if sz == 0 { + return 0, errShortRead + } + + // Fill the buffer: ask for one more byte, but in practice the reader + // will load way more from the underlying stream. + if _, err := r.Peek(1); err != nil { + if err == io.EOF { + err = errShortRead + } + return sz, err + } + + // Grab as many bytes as possible from the buffer, then go on to the + // next loop iteration which is going to consume it. + input, _ = r.Peek(r.Buffered()) + } +} + +func readBool(r *bufio.Reader, sz int, v *bool) (int, error) { + return peekRead(r, sz, 1, func(b []byte) { *v = b[0] != 0 }) +} + func readString(r *bufio.Reader, sz int, v *string) (int, error) { return readStringWith(r, sz, func(r *bufio.Reader, sz int, n int) (remain int, err error) { *v, remain, err = readNewString(r, sz, n) @@ -79,13 +132,12 @@ func readBytes(r *bufio.Reader, sz int, v *[]byte) (int, error) { func readBytesWith(r *bufio.Reader, sz int, cb func(*bufio.Reader, int, int) (int, error)) (int, error) { var err error - var len int32 + var n int - if sz, err = readInt32(r, sz, &len); err != nil { + if sz, err = readArrayLen(r, sz, &n); err != nil { return sz, err } - n := int(len) if n > sz { return sz, errShortRead } @@ -96,17 +148,37 @@ func readBytesWith(r *bufio.Reader, sz int, cb func(*bufio.Reader, int, int) (in func readNewBytes(r *bufio.Reader, sz int, n int) ([]byte, int, error) { var err error var b []byte + var shortRead bool if n > 0 { + if sz < n { + n = sz + shortRead = true + } + b = make([]byte, n) n, err = io.ReadFull(r, b) b = b[:n] sz -= n + + if err == nil && shortRead { + err = errShortRead + } } return b, sz, err } +func readArrayLen(r *bufio.Reader, sz int, n *int) (int, error) { + var err error + var len int32 + if sz, err = readInt32(r, sz, &len); err != nil { + return sz, err + } + *n = int(len) + return sz, nil +} + func readArrayWith(r *bufio.Reader, sz int, cb func(*bufio.Reader, int) (int, error)) (int, error) { var err error var len int32 @@ -186,6 +258,8 @@ func read(r *bufio.Reader, sz int, a interface{}) (int, error) { return readInt32(r, sz, v) case *int64: return readInt64(r, sz, v) + case *bool: + return readBool(r, sz, v) case *string: return readString(r, sz, v) case *[]byte: @@ -267,7 +341,7 @@ func readSlice(r *bufio.Reader, sz int, v reflect.Value) (int, error) { return sz, nil } -func readFetchResponseHeader(r *bufio.Reader, size int) (throttle int32, watermark int64, remain int, err error) { +func readFetchResponseHeaderV2(r *bufio.Reader, size int) (throttle int32, watermark int64, remain int, err error) { var n int32 var p struct { Partition int32 @@ -329,6 +403,201 @@ func readFetchResponseHeader(r *bufio.Reader, size int) (throttle int32, waterma return } +func readFetchResponseHeaderV5(r *bufio.Reader, size int) (throttle int32, watermark int64, remain int, err error) { + var n int32 + type AbortedTransaction struct { + ProducerId int64 + FirstOffset int64 + } + var p struct { + Partition int32 + ErrorCode int16 + HighwaterMarkOffset int64 + LastStableOffset int64 + LogStartOffset int64 + } + var messageSetSize int32 + var abortedTransactions []AbortedTransaction + + if remain, err = readInt32(r, size, &throttle); err != nil { + return + } + + if remain, err = readInt32(r, remain, &n); err != nil { + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if n != 1 { + err = fmt.Errorf("1 kafka topic was expected in the fetch response but the client received %d", n) + return + } + + // We ignore the topic name because we've requests messages for a single + // topic, unless there's a bug in the kafka server we will have received + // the name of the topic that we requested. + if remain, err = discardString(r, remain); err != nil { + return + } + + if remain, err = readInt32(r, remain, &n); err != nil { + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if n != 1 { + err = fmt.Errorf("1 kafka partition was expected in the fetch response but the client received %d", n) + return + } + + if remain, err = read(r, remain, &p); err != nil { + return + } + + var abortedTransactionLen int + if remain, err = readArrayLen(r, remain, &abortedTransactionLen); err != nil { + return + } + + if abortedTransactionLen == -1 { + abortedTransactions = nil + } else { + abortedTransactions = make([]AbortedTransaction, abortedTransactionLen) + for i := 0; i < abortedTransactionLen; i++ { + if remain, err = read(r, remain, &abortedTransactions[i]); err != nil { + return + } + } + } + + if p.ErrorCode != 0 { + err = Error(p.ErrorCode) + return + } + + remain, err = readInt32(r, remain, &messageSetSize) + if err != nil { + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if remain != int(messageSetSize) { + err = fmt.Errorf("the size of the message set in a fetch response doesn't match the number of remaining bytes (message set size = %d, remaining bytes = %d)", messageSetSize, remain) + return + } + + watermark = p.HighwaterMarkOffset + return + +} + +func readFetchResponseHeaderV10(r *bufio.Reader, size int) (throttle int32, watermark int64, remain int, err error) { + var n int32 + var errorCode int16 + type AbortedTransaction struct { + ProducerId int64 + FirstOffset int64 + } + var p struct { + Partition int32 + ErrorCode int16 + HighwaterMarkOffset int64 + LastStableOffset int64 + LogStartOffset int64 + } + var messageSetSize int32 + var abortedTransactions []AbortedTransaction + + if remain, err = readInt32(r, size, &throttle); err != nil { + return + } + + if remain, err = readInt16(r, remain, &errorCode); err != nil { + return + } + if errorCode != 0 { + err = Error(errorCode) + return + } + + if remain, err = discardInt32(r, remain); err != nil { + return + } + + if remain, err = readInt32(r, remain, &n); err != nil { + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if n != 1 { + err = fmt.Errorf("1 kafka topic was expected in the fetch response but the client received %d", n) + return + } + + // We ignore the topic name because we've requests messages for a single + // topic, unless there's a bug in the kafka server we will have received + // the name of the topic that we requested. + if remain, err = discardString(r, remain); err != nil { + return + } + + if remain, err = readInt32(r, remain, &n); err != nil { + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if n != 1 { + err = fmt.Errorf("1 kafka partition was expected in the fetch response but the client received %d", n) + return + } + + if remain, err = read(r, remain, &p); err != nil { + return + } + + var abortedTransactionLen int + if remain, err = readArrayLen(r, remain, &abortedTransactionLen); err != nil { + return + } + + if abortedTransactionLen == -1 { + abortedTransactions = nil + } else { + abortedTransactions = make([]AbortedTransaction, abortedTransactionLen) + for i := 0; i < abortedTransactionLen; i++ { + if remain, err = read(r, remain, &abortedTransactions[i]); err != nil { + return + } + } + } + + if p.ErrorCode != 0 { + err = Error(p.ErrorCode) + return + } + + remain, err = readInt32(r, remain, &messageSetSize) + if err != nil { + return + } + + // This error should never trigger, unless there's a bug in the kafka client + // or server. + if remain != int(messageSetSize) { + err = fmt.Errorf("the size of the message set in a fetch response doesn't match the number of remaining bytes (message set size = %d, remaining bytes = %d)", messageSetSize, remain) + return + } + + watermark = p.HighwaterMarkOffset + return + +} + func readMessageHeader(r *bufio.Reader, sz int) (offset int64, attributes int8, timestamp int64, remain int, err error) { var version int8 @@ -368,33 +637,3 @@ func readMessageHeader(r *bufio.Reader, sz int) (offset int64, attributes int8, return } - -func readMessage(r *bufio.Reader, sz int, min int64, - key func(*bufio.Reader, int, int) (int, error), - val func(*bufio.Reader, int, int) (int, error), -) (offset int64, timestamp int64, remain int, err error) { - for { - // TODO: read attributes and decompress the message - if offset, _, timestamp, remain, err = readMessageHeader(r, sz); err != nil { - return - } - - // When the messages are compressed kafka may return messages at an - // earlier offset than the one that was requested, apparently it's the - // client's responsibility to ignore those. - if offset >= min { - if remain, err = readBytesWith(r, remain, key); err != nil { - return - } - remain, err = readBytesWith(r, remain, val) - return - } - - if remain, err = discardBytes(r, remain); err != nil { - return - } - if remain, err = discardBytes(r, remain); err != nil { - return - } - } -} diff --git a/vendor/github.com/segmentio/kafka-go/reader.go b/vendor/github.com/segmentio/kafka-go/reader.go index 20d79b344..825f6922e 100644 --- a/vendor/github.com/segmentio/kafka-go/reader.go +++ b/vendor/github.com/segmentio/kafka-go/reader.go @@ -1,8 +1,6 @@ package kafka import ( - "bufio" - "bytes" "context" "errors" "fmt" @@ -17,8 +15,8 @@ import ( ) const ( - firstOffset = -1 - lastOffset = -2 + LastOffset int64 = -1 // The most recent offset available for a partition. + FirstOffset = -2 // The least recent offset available for a partition. ) const ( @@ -33,31 +31,10 @@ var ( ) const ( - // defaultProtocolType holds the default protocol type documented in the - // kafka protocol - // - // See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-GroupMembershipAPI - defaultProtocolType = "consumer" - - // defaultHeartbeatInterval contains the default time between heartbeats. If - // the coordinator does not receive a heartbeat within the session timeout interval, - // the consumer will be considered dead and the coordinator will rebalance the - // group. - // - // As a rule, the heartbeat interval should be no greater than 1/3 the session timeout - defaultHeartbeatInterval = 3 * time.Second - - // defaultSessionTimeout contains the default interval the coordinator will wait - // for a heartbeat before marking a consumer as dead - defaultSessionTimeout = 30 * time.Second - - // defaultRebalanceTimeout contains the amount of time the coordinator will wait - // for consumers to issue a join group once a rebalance has been requested - defaultRebalanceTimeout = 30 * time.Second - - // defaultRetentionTime holds the length of time a the consumer group will be - // saved by kafka - defaultRetentionTime = time.Hour * 24 + // defaultReadBackoffMax/Min sets the boundaries for how long the reader wait before + // polling for new messages + defaultReadBackoffMin = 100 * time.Millisecond + defaultReadBackoffMax = 1 * time.Second ) // Reader provides a high-level API for consuming messages from kafka. @@ -72,23 +49,16 @@ type Reader struct { msgs chan readerMessage // mutable fields of the reader (synchronized on the mutex) - mutex sync.Mutex - join sync.WaitGroup - cancel context.CancelFunc - stop context.CancelFunc - done chan struct{} - commits chan commitRequest - version int64 // version holds the generation of the spawned readers - offset int64 - lag int64 - closed bool - address string // address of group coordinator - generationID int32 // generationID of group - memberID string // memberID of group - - // offsetStash should only be managed by the commitLoopInterval. We store - // it here so that it survives rebalances - offsetStash offsetStash + mutex sync.Mutex + join sync.WaitGroup + cancel context.CancelFunc + stop context.CancelFunc + done chan struct{} + commits chan commitRequest + version int64 // version holds the generation of the spawned readers + offset int64 + lag int64 + closed bool // reader stats are all made of atomic values, no need for synchronization. once uint32 @@ -105,408 +75,23 @@ func (r *Reader) useConsumerGroup() bool { return r.config.GroupID != "" } // async commits. func (r *Reader) useSyncCommits() bool { return r.config.CommitInterval == 0 } -// membership returns the group generationID and memberID of the reader. -// -// Only used when config.GroupID != "" -func (r *Reader) membership() (generationID int32, memberID string) { - r.mutex.Lock() - generationID = r.generationID - memberID = r.memberID - r.mutex.Unlock() - return -} - -// lookupCoordinator scans the brokers and looks up the address of the -// coordinator for the group. -// -// Only used when config.GroupID != "" -func (r *Reader) lookupCoordinator() (string, error) { - conn, err := r.connect() - if err != nil { - return "", fmt.Errorf("unable to coordinator to any connect for group, %v: %v\n", r.config.GroupID, err) - } - defer conn.Close() - - out, err := conn.findCoordinator(findCoordinatorRequestV1{ - CoordinatorKey: r.config.GroupID, - }) - if err != nil { - return "", fmt.Errorf("unable to find coordinator for group, %v: %v", r.config.GroupID, err) - } - - address := fmt.Sprintf("%v:%v", out.Coordinator.Host, out.Coordinator.Port) - return address, nil -} - -// refreshCoordinator updates the value of r.address -func (r *Reader) refreshCoordinator() (err error) { - const ( - backoffDelayMin = 100 * time.Millisecond - backoffDelayMax = 1 * time.Second - ) - - for attempt := 0; true; attempt++ { - if attempt != 0 { - if !sleep(r.stctx, backoff(attempt, backoffDelayMin, backoffDelayMax)) { - return r.stctx.Err() - } - } - - address, err := r.lookupCoordinator() - if err != nil { - continue - } - - r.mutex.Lock() - oldAddress := r.address - r.address = address - r.mutex.Unlock() - - if address != oldAddress { - r.withLogger(func(l *log.Logger) { - l.Printf("coordinator for group, %v, set to %v\n", r.config.GroupID, address) - }) - } - - break - } - - return nil -} - -// makeJoinGroupRequestV2 handles the logic of constructing a joinGroup -// request -func (r *Reader) makeJoinGroupRequestV2() (joinGroupRequestV2, error) { - _, memberID := r.membership() - - request := joinGroupRequestV2{ - GroupID: r.config.GroupID, - MemberID: memberID, - SessionTimeout: int32(r.config.SessionTimeout / time.Millisecond), - RebalanceTimeout: int32(r.config.RebalanceTimeout / time.Millisecond), - ProtocolType: defaultProtocolType, - } - - for _, strategy := range allStrategies { - meta, err := strategy.GroupMetadata([]string{r.config.Topic}) - if err != nil { - return joinGroupRequestV2{}, fmt.Errorf("unable to construct protocol metadata for member, %v: %v\n", strategy.ProtocolName(), err) - } - - request.GroupProtocols = append(request.GroupProtocols, joinGroupRequestGroupProtocolV2{ - ProtocolName: strategy.ProtocolName(), - ProtocolMetadata: meta.bytes(), - }) - } - - return request, nil -} - -// makeMemberProtocolMetadata maps encoded member metadata ([]byte) into memberGroupMetadata -func (r *Reader) makeMemberProtocolMetadata(in []joinGroupResponseMemberV2) ([]memberGroupMetadata, error) { - members := make([]memberGroupMetadata, 0, len(in)) - for _, item := range in { - metadata := groupMetadata{} - reader := bufio.NewReader(bytes.NewReader(item.MemberMetadata)) - if remain, err := (&metadata).readFrom(reader, len(item.MemberMetadata)); err != nil || remain != 0 { - return nil, fmt.Errorf("unable to read metadata for member, %v: %v\n", item.MemberID, err) - } - - member := memberGroupMetadata{ - MemberID: item.MemberID, - Metadata: metadata, - } - members = append(members, member) - } - return members, nil -} - -// partitionReader is an internal interface used to simplify unit testing -type partitionReader interface { - // ReadPartitions mirrors Conn.ReadPartitions - ReadPartitions(topics ...string) (partitions []Partition, err error) -} - -// assignTopicPartitions uses the selected strategy to assign members to their -// various partitions -func (r *Reader) assignTopicPartitions(conn partitionReader, group joinGroupResponseV2) (memberGroupAssignments, error) { - r.withLogger(func(l *log.Logger) { - l.Println("selected as leader for group,", r.config.GroupID) - }) - - strategy, ok := findStrategy(group.GroupProtocol, allStrategies) - if !ok { - return nil, fmt.Errorf("unable to find selected strategy, %v, for group, %v", group.GroupProtocol, r.config.GroupID) - } - - members, err := r.makeMemberProtocolMetadata(group.Members) - if err != nil { - return nil, fmt.Errorf("unable to construct MemberProtocolMetadata: %v", err) - } - - topics := extractTopics(members) - partitions, err := conn.ReadPartitions(topics...) - if err != nil { - return nil, fmt.Errorf("unable to read partitions: %v", err) - } - - r.withLogger(func(l *log.Logger) { - l.Printf("using '%v' strategy to assign group, %v\n", group.GroupProtocol, r.config.GroupID) - for _, partition := range partitions { - l.Printf("found topic/partition: %v/%v", partition.Topic, partition.ID) - } - }) - - return strategy.AssignGroups(members, partitions), nil -} - -func (r *Reader) leaveGroup(conn *Conn) error { - _, memberID := r.membership() - _, err := conn.leaveGroup(leaveGroupRequestV1{ - GroupID: r.config.GroupID, - MemberID: memberID, - }) - if err != nil { - return fmt.Errorf("leave group failed for group, %v, and member, %v: %v", r.config.GroupID, memberID, err) - } - - return nil -} - -// joinGroup attempts to join the reader to the consumer group. -// Returns memberGroupAssignments is this Reader was selected as -// the leader. Otherwise, memberGroupAssignments will be nil. -// -// Possible kafka error codes returned: -// * GroupLoadInProgress: -// * GroupCoordinatorNotAvailable: -// * NotCoordinatorForGroup: -// * InconsistentGroupProtocol: -// * InvalidSessionTimeout: -// * GroupAuthorizationFailed: -func (r *Reader) joinGroup() (memberGroupAssignments, error) { - conn, err := r.coordinator() - if err != nil { - return nil, err - } - defer conn.Close() - - request, err := r.makeJoinGroupRequestV2() - if err != nil { - return nil, err - } - - response, err := conn.joinGroup(request) - if err != nil { - switch err { - case UnknownMemberId: - r.mutex.Lock() - r.memberID = "" - r.mutex.Unlock() - return nil, fmt.Errorf("joinGroup failed: %v", err) - - default: - return nil, fmt.Errorf("joinGroup failed: %v", err) - } - } - - // Extract our membership and generationID from the response - r.mutex.Lock() - oldGenerationID := r.generationID - oldMemberID := r.memberID - r.generationID = response.GenerationID - r.memberID = response.MemberID - r.mutex.Unlock() - - if oldGenerationID != response.GenerationID || oldMemberID != response.MemberID { - r.withLogger(func(l *log.Logger) { - l.Printf("response membership changed. generationID: %v => %v, memberID: '%v' => '%v'\n", - oldGenerationID, - response.GenerationID, - oldMemberID, - response.MemberID, - ) - }) - } - - var assignments memberGroupAssignments - if iAmLeader := response.MemberID == response.LeaderID; iAmLeader { - v, err := r.assignTopicPartitions(conn, response) - if err != nil { - _ = r.leaveGroup(conn) - return nil, err - } - assignments = v - - r.withLogger(func(l *log.Logger) { - for memberID, assignment := range assignments { - for topic, partitions := range assignment { - l.Printf("assigned member/topic/partitions %v/%v/%v\n", memberID, topic, partitions) - } - } - }) - } - - r.withLogger(func(l *log.Logger) { - l.Printf("joinGroup succeeded for response, %v. generationID=%v, memberID=%v\n", r.config.GroupID, response.GenerationID, response.MemberID) - }) - - return assignments, nil -} - -func (r *Reader) makeSyncGroupRequestV1(memberAssignments memberGroupAssignments) syncGroupRequestV1 { - generationID, memberID := r.membership() - request := syncGroupRequestV1{ - GroupID: r.config.GroupID, - GenerationID: generationID, - MemberID: memberID, - } - - if memberAssignments != nil { - request.GroupAssignments = make([]syncGroupRequestGroupAssignmentV1, 0, 1) - - for memberID, topics := range memberAssignments { - request.GroupAssignments = append(request.GroupAssignments, syncGroupRequestGroupAssignmentV1{ - MemberID: memberID, - MemberAssignments: groupAssignment{ - Version: 1, - Topics: topics, - }.bytes(), - }) - } - } - - return request -} - -// syncGroup completes the consumer group handshake by accepting the -// memberAssignments (if this Reader is the leader) and returning this -// Readers subscriptions topic => partitions -// -// Possible kafka error codes returned: -// * GroupCoordinatorNotAvailable: -// * NotCoordinatorForGroup: -// * IllegalGeneration: -// * RebalanceInProgress: -// * GroupAuthorizationFailed: -func (r *Reader) syncGroup(memberAssignments memberGroupAssignments) (map[string][]int32, error) { - conn, err := r.coordinator() - if err != nil { - return nil, err - } - defer conn.Close() - - request := r.makeSyncGroupRequestV1(memberAssignments) - response, err := conn.syncGroups(request) - if err != nil { - switch err { - case RebalanceInProgress: - // don't leave the group - return nil, fmt.Errorf("syncGroup failed: %v", err) - - case UnknownMemberId: - r.mutex.Lock() - r.memberID = "" - r.mutex.Unlock() - _ = r.leaveGroup(conn) - return nil, fmt.Errorf("syncGroup failed: %v", err) - - default: - _ = r.leaveGroup(conn) - return nil, fmt.Errorf("syncGroup failed: %v", err) - } - } - - assignments := groupAssignment{} - reader := bufio.NewReader(bytes.NewReader(response.MemberAssignments)) - if _, err := (&assignments).readFrom(reader, len(response.MemberAssignments)); err != nil { - _ = r.leaveGroup(conn) - return nil, fmt.Errorf("unable to read SyncGroup response for group, %v: %v\n", r.config.GroupID, err) - } - - r.withLogger(func(l *log.Logger) { - l.Printf("sync group finished for group, %v\n", r.config.GroupID) - }) - - return assignments.Topics, nil -} - -func (r *Reader) rebalance() (map[string][]int32, error) { - r.withLogger(func(l *log.Logger) { - l.Printf("rebalancing consumer group, %v", r.config.GroupID) - }) - - if err := r.refreshCoordinator(); err != nil { - return nil, err - } - - members, err := r.joinGroup() - if err != nil { - return nil, err - } - - assignments, err := r.syncGroup(members) - if err != nil { - return nil, err - } - - return assignments, nil -} - -func (r *Reader) unsubscribe() error { +func (r *Reader) unsubscribe() { r.cancel() r.join.Wait() - return nil -} - -func (r *Reader) fetchOffsets(subs map[string][]int32) (map[int]int64, error) { - conn, err := r.coordinator() - if err != nil { - return nil, err - } - defer conn.Close() - - partitions := subs[r.config.Topic] - offsets, err := conn.offsetFetch(offsetFetchRequestV3{ - GroupID: r.config.GroupID, - Topics: []offsetFetchRequestV3Topic{ - { - Topic: r.config.Topic, - Partitions: partitions, - }, - }, - }) - if err != nil { - return nil, err - } - - offsetsByPartition := map[int]int64{} - for _, pr := range offsets.Responses[0].PartitionResponses { - for _, partition := range partitions { - if partition == pr.Partition { - offset := pr.Offset - offsetsByPartition[int(partition)] = offset - } - } - } - - return offsetsByPartition, nil + // it would be interesting to drain the r.msgs channel at this point since + // it will contain buffered messages for partitions that may not be + // re-assigned to this reader in the next consumer group generation. + // however, draining the channel could race with the client calling + // ReadMessage, which could result in messages delivered and/or committed + // with gaps in the offset. for now, we will err on the side of caution and + // potentially have those messages be reprocessed in the next generation by + // another consumer to avoid such a race. } -func (r *Reader) subscribe(subs map[string][]int32) error { - if len(subs[r.config.Topic]) == 0 { - return nil - } - - offsetsByPartition, err := r.fetchOffsets(subs) - if err != nil { - if conn, err := r.coordinator(); err == nil { - // make an attempt at leaving the group - _ = r.leaveGroup(conn) - conn.Close() - } - - return err +func (r *Reader) subscribe(assignments []PartitionAssignment) { + offsetsByPartition := make(map[int]int64) + for _, assignment := range assignments { + offsetsByPartition[assignment.ID] = assignment.Offset } r.mutex.Lock() @@ -516,32 +101,6 @@ func (r *Reader) subscribe(subs map[string][]int32) error { r.withLogger(func(l *log.Logger) { l.Printf("subscribed to partitions: %+v", offsetsByPartition) }) - - return nil -} - -// connect returns a connection to ANY broker -func (r *Reader) connect() (conn *Conn, err error) { - for _, broker := range r.config.Brokers { - if conn, err = r.config.Dialer.Dial("tcp", broker); err == nil { - return - } - } - return // err will be non-nil -} - -// coordinator returns a connection to the coordinator for this group -func (r *Reader) coordinator() (*Conn, error) { - r.mutex.Lock() - address := r.address - r.mutex.Unlock() - - conn, err := r.config.Dialer.DialContext(r.stctx, "tcp", address) - if err != nil { - return nil, fmt.Errorf("unable to connect to coordinator, %v", address) - } - - return conn, nil } func (r *Reader) waitThrottleTime(throttleTimeMS int32) { @@ -559,96 +118,9 @@ func (r *Reader) waitThrottleTime(throttleTimeMS int32) { } } -// heartbeat sends heartbeat to coordinator at the interval defined by -// ReaderConfig.HeartbeatInterval -func (r *Reader) heartbeat(conn *Conn) error { - generationID, memberID := r.membership() - if generationID == 0 && memberID == "" { - return nil - } - - resp, err := conn.heartbeat(heartbeatRequestV1{ - GroupID: r.config.GroupID, - GenerationID: generationID, - MemberID: memberID, - }) - if err != nil { - return fmt.Errorf("heartbeat failed: %v", err) - } - - r.waitThrottleTime(resp.ThrottleTimeMS) - - return nil -} - -func (r *Reader) heartbeatLoop(conn *Conn) func(stop <-chan struct{}) { - return func(stop <-chan struct{}) { - r.withLogger(func(l *log.Logger) { - l.Printf("started heartbeat for group, %v [%v]", r.config.GroupID, r.config.HeartbeatInterval) - }) - defer r.withLogger(func(l *log.Logger) { - l.Println("stopped heartbeat for group,", r.config.GroupID) - }) - - ticker := time.NewTicker(r.config.HeartbeatInterval) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - if err := r.heartbeat(conn); err != nil { - return - } - - case <-stop: - return - } - } - } -} - -type offsetCommitter interface { - offsetCommit(request offsetCommitRequestV3) (offsetCommitResponseV3, error) -} - -func (r *Reader) commitOffsets(conn offsetCommitter, offsetStash offsetStash) error { - if len(offsetStash) == 0 { - return nil - } - - generationID, memberID := r.membership() - request := offsetCommitRequestV3{ - GroupID: r.config.GroupID, - GenerationID: generationID, - MemberID: memberID, - RetentionTime: int64(r.config.RetentionTime / time.Millisecond), - } - - for topic, partitions := range offsetStash { - t := offsetCommitRequestV3Topic{Topic: topic} - for partition, offset := range partitions { - t.Partitions = append(t.Partitions, offsetCommitRequestV3Partition{ - Partition: int32(partition), - Offset: offset, - }) - } - request.Topics = append(request.Topics, t) - } - - if _, err := conn.offsetCommit(request); err != nil { - return fmt.Errorf("unable to commit offsets for group, %v: %v", r.config.GroupID, err) - } - - r.withLogger(func(l *log.Logger) { - l.Printf("committed offsets: %v", offsetStash) - }) - - return nil -} - // commitOffsetsWithRetry attempts to commit the specified offsets and retries // up to the specified number of times -func (r *Reader) commitOffsetsWithRetry(conn offsetCommitter, offsetStash offsetStash, retries int) (err error) { +func (r *Reader) commitOffsetsWithRetry(gen *Generation, offsetStash offsetStash, retries int) (err error) { const ( backoffDelayMin = 100 * time.Millisecond backoffDelayMax = 5 * time.Second @@ -661,7 +133,7 @@ func (r *Reader) commitOffsetsWithRetry(conn offsetCommitter, offsetStash offset } } - if err = r.commitOffsets(conn, offsetStash); err == nil { + if err = gen.CommitOffsets(offsetStash); err == nil { return } } @@ -695,39 +167,52 @@ func (o offsetStash) reset() { } // commitLoopImmediate handles each commit synchronously -func (r *Reader) commitLoopImmediate(conn offsetCommitter, stop <-chan struct{}) { - offsetsByTopicAndPartition := offsetStash{} +func (r *Reader) commitLoopImmediate(ctx context.Context, gen *Generation) { + offsets := offsetStash{} for { select { - case <-stop: + case <-ctx.Done(): return case req := <-r.commits: - offsetsByTopicAndPartition.merge(req.commits) - req.errch <- r.commitOffsetsWithRetry(conn, offsetsByTopicAndPartition, defaultCommitRetries) - offsetsByTopicAndPartition.reset() + offsets.merge(req.commits) + req.errch <- r.commitOffsetsWithRetry(gen, offsets, defaultCommitRetries) + offsets.reset() } } } // commitLoopInterval handles each commit asynchronously with a period defined // by ReaderConfig.CommitInterval -func (r *Reader) commitLoopInterval(conn offsetCommitter, stop <-chan struct{}) { - ticker := time.NewTicker(r.config.HeartbeatInterval) +func (r *Reader) commitLoopInterval(ctx context.Context, gen *Generation) { + ticker := time.NewTicker(r.config.CommitInterval) defer ticker.Stop() + // the offset stash should not survive rebalances b/c the consumer may + // receive new assignments. + offsets := offsetStash{} + commit := func() { - if err := r.commitOffsetsWithRetry(conn, r.offsetStash, defaultCommitRetries); err != nil { + if err := r.commitOffsetsWithRetry(gen, offsets, defaultCommitRetries); err != nil { r.withErrorLogger(func(l *log.Logger) { l.Print(err) }) } else { - r.offsetStash.reset() + offsets.reset() } } for { select { - case <-stop: + case <-ctx.Done(): + // drain the commit channel in order to prepare the final commit. + for hasCommits := true; hasCommits; { + select { + case req := <-r.commits: + offsets.merge(req.commits) + default: + hasCommits = false + } + } commit() return @@ -735,89 +220,69 @@ func (r *Reader) commitLoopInterval(conn offsetCommitter, stop <-chan struct{}) commit() case req := <-r.commits: - r.offsetStash.merge(req.commits) + offsets.merge(req.commits) } } } // commitLoop processes commits off the commit chan -func (r *Reader) commitLoop(conn *Conn) func(stop <-chan struct{}) { - return func(stop <-chan struct{}) { - r.withLogger(func(l *log.Logger) { - l.Println("started commit for group,", r.config.GroupID) - }) - defer r.withLogger(func(l *log.Logger) { - l.Println("stopped commit for group,", r.config.GroupID) - }) - - if r.config.CommitInterval == 0 { - r.commitLoopImmediate(conn, stop) - } else { - r.commitLoopInterval(conn, stop) - } - } -} - -// handshake performs the necessary incantations to join this Reader to the desired -// consumer group. handshake will be called whenever the group is disrupted -// (member join, member leave, coordinator changed, etc) -func (r *Reader) handshake() error { - // always clear prior to subscribe - r.unsubscribe() - - // rebalance and fetch assignments - assignments, err := r.rebalance() - if err != nil { - return fmt.Errorf("rebalance failed for consumer group, %v: %v", r.config.GroupID, err) - } - - conn, err := r.coordinator() - if err != nil { - return fmt.Errorf("heartbeat: unable to connect to coordinator: %v", err) - } - defer conn.Close() - - rg := &runGroup{} - rg = rg.WithContext(r.stctx) - rg.Go(r.heartbeatLoop(conn)) - rg.Go(r.commitLoop(conn)) +func (r *Reader) commitLoop(ctx context.Context, gen *Generation) { + r.withLogger(func(l *log.Logger) { + l.Println("started commit for group,", r.config.GroupID) + }) + defer r.withLogger(func(l *log.Logger) { + l.Println("stopped commit for group,", r.config.GroupID) + }) - // subscribe to assignments - if err := r.subscribe(assignments); err != nil { - rg.Stop() - return fmt.Errorf("subscribe failed for consumer group, %v: %v\n", r.config.GroupID, err) + if r.config.CommitInterval == 0 { + r.commitLoopImmediate(ctx, gen) + } else { + r.commitLoopInterval(ctx, gen) } - - rg.Wait() - - return nil } // run provides the main consumer group management loop. Each iteration performs the // handshake to join the Reader to the consumer group. -func (r *Reader) run() { +// +// This function is responsible for closing the consumer group upon exit. +func (r *Reader) run(cg *ConsumerGroup) { defer close(r.done) - - if !r.useConsumerGroup() { - return - } + defer cg.Close() r.withLogger(func(l *log.Logger) { l.Printf("entering loop for consumer group, %v\n", r.config.GroupID) }) for { - if err := r.handshake(); err != nil { + gen, err := cg.Next(r.stctx) + if err != nil { + if err == r.stctx.Err() { + return + } + r.stats.errors.observe(1) r.withErrorLogger(func(l *log.Logger) { l.Println(err) }) + continue } - select { - case <-r.stctx.Done(): - return - default: - } + r.stats.rebalances.observe(1) + + r.subscribe(gen.Assignments[r.config.Topic]) + + gen.Start(func(ctx context.Context) { + r.commitLoop(ctx, gen) + }) + gen.Start(func(ctx context.Context) { + // wait for the generation to end and then unsubscribe. + select { + case <-ctx.Done(): + // continue to next generation + case <-r.stctx.Done(): + // this will be the last loop because the reader is closed. + } + r.unsubscribe() + }) } } @@ -858,6 +323,15 @@ type ReaderConfig struct { // Setting this field to a negative value disables lag reporting. ReadLagInterval time.Duration + // GroupBalancers is the priority-ordered list of client-side consumer group + // balancing strategies that will be offered to the coordinator. The first + // strategy that all group members support will be chosen by the leader. + // + // Default: [Range, RoundRobin] + // + // Only used when GroupID is set + GroupBalancers []GroupBalancer + // HeartbeatInterval sets the optional frequency at which the reader sends the consumer // group heartbeat update. // @@ -869,11 +343,24 @@ type ReaderConfig struct { // CommitInterval indicates the interval at which offsets are committed to // the broker. If 0, commits will be handled synchronously. // - // Defaults to 1s + // Default: 0 // // Only used when GroupID is set CommitInterval time.Duration + // PartitionWatchInterval indicates how often a reader checks for partition changes. + // If a reader sees a partition change (such as a partition add) it will rebalance the group + // picking up new partitions. + // + // Default: 5s + // + // Only used when GroupID is set and WatchPartitionChanges is set. + PartitionWatchInterval time.Duration + + // WatchForPartitionChanges is used to inform kafka-go that a consumer group should be + // polling the brokers and rebalancing if any partition changes happen to the topic. + WatchPartitionChanges bool + // SessionTimeout optionally sets the length of time that may pass without a heartbeat // before the coordinator considers the consumer dead and initiates a rebalance. // @@ -891,6 +378,12 @@ type ReaderConfig struct { // Only used when GroupID is set RebalanceTimeout time.Duration + // JoinGroupBackoff optionally sets the length of time to wait between re-joining + // the consumer group after an error. + // + // Default: 5s + JoinGroupBackoff time.Duration + // RetentionTime optionally sets the length of time the consumer group will be saved // by the broker // @@ -899,6 +392,27 @@ type ReaderConfig struct { // Only used when GroupID is set RetentionTime time.Duration + // StartOffset determines from whence the consumer group should begin + // consuming when it finds a partition without a committed offset. If + // non-zero, it must be set to one of FirstOffset or LastOffset. + // + // Default: FirstOffset + // + // Only used when GroupID is set + StartOffset int64 + + // BackoffDelayMin optionally sets the smallest amount of time the reader will wait before + // polling for new messages + // + // Default: 100ms + ReadBackoffMin time.Duration + + // BackoffDelayMax optionally sets the maximum amount of time the reader will wait before + // polling for new messages + // + // Default: 1s + ReadBackoffMax time.Duration + // If not nil, specifies a logger used to report internal changes within the // reader. Logger *log.Logger @@ -906,13 +420,65 @@ type ReaderConfig struct { // ErrorLogger is the logger used to report errors. If nil, the reader falls // back to using Logger instead. ErrorLogger *log.Logger + + // IsolationLevel controls the visibility of transactional records. + // ReadUncommitted makes all records visible. With ReadCommitted only + // non-transactional and committed records are visible. + IsolationLevel IsolationLevel + + // Limit of how many attempts will be made before delivering the error. + // + // The default is to try 3 times. + MaxAttempts int +} + +// Validate method validates ReaderConfig properties. +func (config *ReaderConfig) Validate() error { + + if len(config.Brokers) == 0 { + return errors.New("cannot create a new kafka reader with an empty list of broker addresses") + } + + if len(config.Topic) == 0 { + return errors.New("cannot create a new kafka reader with an empty topic") + } + + if config.Partition < 0 || config.Partition >= math.MaxInt32 { + return errors.New(fmt.Sprintf("partition number out of bounds: %d", config.Partition)) + } + + if config.MinBytes < 0 { + return errors.New(fmt.Sprintf("invalid negative minimum batch size (min = %d)", config.MinBytes)) + } + + if config.MaxBytes < 0 { + return errors.New(fmt.Sprintf("invalid negative maximum batch size (max = %d)", config.MaxBytes)) + } + + if config.GroupID != "" && config.Partition != 0 { + return errors.New("either Partition or GroupID may be specified, but not both") + } + + if config.MinBytes > config.MaxBytes { + return errors.New(fmt.Sprintf("minimum batch size greater than the maximum (min = %d, max = %d)", config.MinBytes, config.MaxBytes)) + } + + if config.ReadBackoffMax < 0 { + return errors.New(fmt.Sprintf("ReadBackoffMax out of bounds: %d", config.ReadBackoffMax)) + } + + if config.ReadBackoffMin < 0 { + return errors.New(fmt.Sprintf("ReadBackoffMin out of bounds: %d", config.ReadBackoffMin)) + } + + return nil } // ReaderStats is a data structure returned by a call to Reader.Stats that exposes // details about the behavior of the reader. type ReaderStats struct { Dials int64 `metric:"kafka.reader.dial.count" type:"counter"` - Fetches int64 `metric:"kafak.reader.fetch.count" type:"counter"` // typo here, but I'm reluctant to fix it + Fetches int64 `metric:"kafka.reader.fetch.count" type:"counter"` Messages int64 `metric:"kafka.reader.message.count" type:"counter"` Bytes int64 `metric:"kafka.reader.message.bytes" type:"counter"` Rebalances int64 `metric:"kafka.reader.rebalance.count" type:"counter"` @@ -936,6 +502,12 @@ type ReaderStats struct { ClientID string `tag:"client_id"` Topic string `tag:"topic"` Partition string `tag:"partition"` + + // The original `Fetches` field had a typo where the metric name was called + // "kafak..." instead of "kafka...", in order to offer time to fix monitors + // that may be relying on this mistake we are temporarily introducing this + // field. + DeprecatedFetchesWithTypo int64 `metric:"kafak.reader.fetch.count" type:"counter"` } // readerStats is a struct that contains statistics on a reader. @@ -958,54 +530,19 @@ type readerStats struct { } // NewReader creates and returns a new Reader configured with config. +// The offset is initialized to FirstOffset. func NewReader(config ReaderConfig) *Reader { - if len(config.Brokers) == 0 { - panic("cannot create a new kafka reader with an empty list of broker addresses") - } - - if len(config.Topic) == 0 { - panic("cannot create a new kafka reader with an empty topic") - } - - if config.Partition < 0 || config.Partition >= math.MaxInt32 { - panic(fmt.Sprintf("partition number out of bounds: %d", config.Partition)) - } - if config.MinBytes > config.MaxBytes { - panic(fmt.Sprintf("minimum batch size greater than the maximum (min = %d, max = %d)", config.MinBytes, config.MaxBytes)) - } - - if config.MinBytes < 0 { - panic(fmt.Sprintf("invalid negative minimum batch size (min = %d)", config.MinBytes)) - } - - if config.MaxBytes < 0 { - panic(fmt.Sprintf("invalid negative maximum batch size (max = %d)", config.MaxBytes)) - } - - if config.GroupID != "" && config.Partition != 0 { - panic("either Partition or GroupID may be specified, but not both") + if err := config.Validate(); err != nil { + panic(err) } if config.GroupID != "" { - if config.HeartbeatInterval < 0 || (config.HeartbeatInterval/time.Millisecond) >= math.MaxInt32 { - panic(fmt.Sprintf("HeartbeatInterval out of bounds: %d", config.HeartbeatInterval)) - } - - if config.SessionTimeout < 0 || (config.SessionTimeout/time.Millisecond) >= math.MaxInt32 { - panic(fmt.Sprintf("SessionTimeout out of bounds: %d", config.SessionTimeout)) - } - - if config.RebalanceTimeout < 0 || (config.RebalanceTimeout/time.Millisecond) >= math.MaxInt32 { - panic(fmt.Sprintf("RebalanceTimeout out of bounds: %d", config.RebalanceTimeout)) - } - - if config.RetentionTime < 0 || (config.RetentionTime/time.Millisecond) >= math.MaxInt32 { - panic(fmt.Sprintf("RetentionTime out of bounds: %d", config.RetentionTime)) - } - - if config.CommitInterval < 0 || (config.CommitInterval/time.Millisecond) >= math.MaxInt32 { - panic(fmt.Sprintf("CommitInterval out of bounds: %d", config.CommitInterval)) + if len(config.GroupBalancers) == 0 { + config.GroupBalancers = []GroupBalancer{ + RangeGroupBalancer{}, + RoundRobinGroupBalancer{}, + } } } @@ -1029,26 +566,26 @@ func NewReader(config ReaderConfig) *Reader { config.ReadLagInterval = 1 * time.Minute } - if config.HeartbeatInterval == 0 { - config.HeartbeatInterval = defaultHeartbeatInterval + if config.ReadBackoffMin == 0 { + config.ReadBackoffMin = defaultReadBackoffMin } - if config.SessionTimeout == 0 { - config.SessionTimeout = defaultSessionTimeout + if config.ReadBackoffMax == 0 { + config.ReadBackoffMax = defaultReadBackoffMax } - if config.RebalanceTimeout == 0 { - config.RebalanceTimeout = defaultRebalanceTimeout - } - - if config.RetentionTime == 0 { - config.RetentionTime = defaultRetentionTime + if config.ReadBackoffMax < config.ReadBackoffMin { + panic(fmt.Errorf("ReadBackoffMax %d smaller than ReadBackoffMin %d", config.ReadBackoffMax, config.ReadBackoffMin)) } if config.QueueCapacity == 0 { config.QueueCapacity = 100 } + if config.MaxAttempts == 0 { + config.MaxAttempts = 3 + } + // when configured as a consumer group; stats should report a partition of -1 readerStatsPartition := config.Partition if config.GroupID != "" { @@ -1067,10 +604,9 @@ func NewReader(config ReaderConfig) *Reader { config: config, msgs: make(chan readerMessage, config.QueueCapacity), cancel: func() {}, - done: make(chan struct{}), - commits: make(chan commitRequest), + commits: make(chan commitRequest, config.QueueCapacity), stop: stop, - offset: firstOffset, + offset: FirstOffset, stctx: stctx, stats: &readerStats{ dialTime: makeSummary(), @@ -1082,11 +618,33 @@ func NewReader(config ReaderConfig) *Reader { // once when the reader is created. partition: strconv.Itoa(readerStatsPartition), }, - version: version, - offsetStash: offsetStash{}, + version: version, } - go r.run() + if r.useConsumerGroup() { + r.done = make(chan struct{}) + cg, err := NewConsumerGroup(ConsumerGroupConfig{ + ID: r.config.GroupID, + Brokers: r.config.Brokers, + Dialer: r.config.Dialer, + Topics: []string{r.config.Topic}, + GroupBalancers: r.config.GroupBalancers, + HeartbeatInterval: r.config.HeartbeatInterval, + PartitionWatchInterval: r.config.PartitionWatchInterval, + WatchPartitionChanges: r.config.WatchPartitionChanges, + SessionTimeout: r.config.SessionTimeout, + RebalanceTimeout: r.config.RebalanceTimeout, + JoinGroupBackoff: r.config.JoinGroupBackoff, + RetentionTime: r.config.RetentionTime, + StartOffset: r.config.StartOffset, + Logger: r.config.Logger, + ErrorLogger: r.config.ErrorLogger, + }) + if err != nil { + panic(err) + } + go r.run(cg) + } return r } @@ -1110,17 +668,10 @@ func (r *Reader) Close() error { r.stop() r.join.Wait() - if r.useConsumerGroup() { - // gracefully attempt to leave the consumer group on close - if generationID, membershipID := r.membership(); generationID > 0 && membershipID != "" { - if conn, err := r.coordinator(); err == nil { - _ = r.leaveGroup(conn) - } - } + if r.done != nil { + <-r.done } - <-r.done - if !closed { close(r.msgs) } @@ -1218,12 +769,11 @@ func (r *Reader) CommitMessages(ctx context.Context, msgs ...Message) error { } var errch <-chan error - var sync = r.useSyncCommits() var creq = commitRequest{ commits: makeCommits(msgs...), } - if sync { + if r.useSyncCommits() { ch := make(chan error, 1) errch, creq.errch = ch, ch } @@ -1238,7 +788,7 @@ func (r *Reader) CommitMessages(ctx context.Context, msgs ...Message) error { return io.ErrClosedPipe } - if !sync { + if !r.useSyncCommits() { return nil } @@ -1306,10 +856,10 @@ func (r *Reader) ReadLag(ctx context.Context) (lag int64, err error) { select { case off := <-offch: switch cur := r.Offset(); { - case cur == firstOffset: + case cur == FirstOffset: lag = off.last - off.first - case cur == lastOffset: + case cur == LastOffset: lag = 0 default: @@ -1323,7 +873,8 @@ func (r *Reader) ReadLag(ctx context.Context) (lag int64, err error) { return } -// Offset returns the current offset of the reader. +// Offset returns the current absolute offset of the reader, or -1 +// if r is backed by a consumer group. func (r *Reader) Offset() int64 { if r.useConsumerGroup() { return -1 @@ -1338,7 +889,8 @@ func (r *Reader) Offset() int64 { return offset } -// Lag returns the lag of the last message returned by ReadMessage. +// Lag returns the lag of the last message returned by ReadMessage, or -1 +// if r is backed by a consumer group. func (r *Reader) Lag() int64 { if r.useConsumerGroup() { return -1 @@ -1351,12 +903,13 @@ func (r *Reader) Lag() int64 { } // SetOffset changes the offset from which the next batch of messages will be -// read. -// -// Setting the offset ot -1 means to seek to the first offset. -// Setting the offset to -2 means to seek to the last offset. +// read. The method fails with io.ErrClosedPipe if the reader has already been closed. // -// The method fails with io.ErrClosedPipe if the reader has already been closed. +// From version 0.2.0, FirstOffset and LastOffset can be used to indicate the first +// or last available offset in the partition. Please note while -1 and -2 were accepted +// to indicate the first or last offset in previous versions, the meanings of the numbers +// were swapped in 0.2.0 to match the meanings in other libraries and the Kafka protocol +// specification. func (r *Reader) SetOffset(offset int64) error { if r.useConsumerGroup() { return errNotAvailableWithGroup @@ -1385,6 +938,38 @@ func (r *Reader) SetOffset(offset int64) error { return err } +// SetOffsetAt changes the offset from which the next batch of messages will be +// read given the timestamp t. +// +// The method fails if the unable to connect partition leader, or unable to read the offset +// given the ts, or if the reader has been closed. +func (r *Reader) SetOffsetAt(ctx context.Context, t time.Time) error { + r.mutex.Lock() + if r.closed { + r.mutex.Unlock() + return io.ErrClosedPipe + } + r.mutex.Unlock() + + for _, broker := range r.config.Brokers { + conn, err := r.config.Dialer.DialLeader(ctx, "tcp", broker, r.config.Topic, r.config.Partition) + if err != nil { + continue + } + + deadline, _ := ctx.Deadline() + conn.SetDeadline(deadline) + offset, err := conn.ReadOffset(t) + conn.Close() + if err != nil { + return err + } + + return r.SetOffset(offset) + } + return fmt.Errorf("error setting offset for timestamp %+v", t) +} + // Stats returns a snapshot of the reader stats since the last time the method // was called, or since the reader was created if it is called for the first // time. @@ -1393,7 +978,7 @@ func (r *Reader) SetOffset(offset int64) error { // call Stats on a kafka reader and report the metrics to a stats collection // system. func (r *Reader) Stats() ReaderStats { - return ReaderStats{ + stats := ReaderStats{ Dials: r.stats.dials.snapshot(), Fetches: r.stats.fetches.snapshot(), Messages: r.stats.messages.snapshot(), @@ -1417,6 +1002,9 @@ func (r *Reader) Stats() ReaderStats { Topic: r.config.Topic, Partition: r.stats.partition, } + // TODO: remove when we get rid of the deprecated field. + stats.DeprecatedFetchesWithTypo = stats.Fetches + return stats } func (r *Reader) withLogger(do func(*log.Logger)) { @@ -1487,18 +1075,22 @@ func (r *Reader) start(offsetsByPartition map[int]int64) { defer join.Done() (&reader{ - dialer: r.config.Dialer, - logger: r.config.Logger, - errorLogger: r.config.ErrorLogger, - brokers: r.config.Brokers, - topic: r.config.Topic, - partition: partition, - minBytes: r.config.MinBytes, - maxBytes: r.config.MaxBytes, - maxWait: r.config.MaxWait, - version: r.version, - msgs: r.msgs, - stats: r.stats, + dialer: r.config.Dialer, + logger: r.config.Logger, + errorLogger: r.config.ErrorLogger, + brokers: r.config.Brokers, + topic: r.config.Topic, + partition: partition, + minBytes: r.config.MinBytes, + maxBytes: r.config.MaxBytes, + maxWait: r.config.MaxWait, + backoffDelayMin: r.config.ReadBackoffMin, + backoffDelayMax: r.config.ReadBackoffMax, + version: r.version, + msgs: r.msgs, + stats: r.stats, + isolationLevel: r.config.IsolationLevel, + maxAttempts: r.config.MaxAttempts, }).run(ctx, offset) }(ctx, partition, offset, &r.join) } @@ -1508,18 +1100,22 @@ func (r *Reader) start(offsetsByPartition map[int]int64) { // used as an way to asynchronously fetch messages while the main program reads // them using the high level reader API. type reader struct { - dialer *Dialer - logger *log.Logger - errorLogger *log.Logger - brokers []string - topic string - partition int - minBytes int - maxBytes int - maxWait time.Duration - version int64 - msgs chan<- readerMessage - stats *readerStats + dialer *Dialer + logger *log.Logger + errorLogger *log.Logger + brokers []string + topic string + partition int + minBytes int + maxBytes int + maxWait time.Duration + backoffDelayMin time.Duration + backoffDelayMax time.Duration + version int64 + msgs chan<- readerMessage + stats *readerStats + isolationLevel IsolationLevel + maxAttempts int } type readerMessage struct { @@ -1530,9 +1126,6 @@ type readerMessage struct { } func (r *reader) run(ctx context.Context, offset int64) { - const backoffDelayMin = 100 * time.Millisecond - const backoffDelayMax = 1 * time.Second - // This is the reader's main loop, it only ends if the context is canceled // and will keep attempting to reader messages otherwise. // @@ -1544,7 +1137,7 @@ func (r *reader) run(ctx context.Context, offset int64) { // on a Read call after reading the first error. for attempt := 0; true; attempt++ { if attempt != 0 { - if !sleep(ctx, backoff(attempt, backoffDelayMin, backoffDelayMax)) { + if !sleep(ctx, backoff(attempt, r.backoffDelayMin, r.backoffDelayMax)) { return } } @@ -1568,7 +1161,7 @@ func (r *reader) run(ctx context.Context, offset int64) { // Wait 4 attempts before reporting the first errors, this helps // mitigate situations where the kafka server is temporarily // unavailable. - if attempt >= 3 { + if attempt >= r.maxAttempts { r.sendError(ctx, err) } else { r.stats.errors.observe(1) @@ -1591,7 +1184,7 @@ func (r *reader) run(ctx context.Context, offset int64) { errcount := 0 readLoop: for { - if !sleep(ctx, backoff(errcount, backoffDelayMin, backoffDelayMax)) { + if !sleep(ctx, backoff(errcount, r.backoffDelayMin, r.backoffDelayMax)) { conn.Close() return } @@ -1599,7 +1192,17 @@ func (r *reader) run(ctx context.Context, offset int64) { switch offset, err = r.read(ctx, offset, conn); err { case nil: errcount = 0 + case UnknownTopicOrPartition: + r.withErrorLogger(func(log *log.Logger) { + log.Printf("failed to read from current broker for partition %d of %s at offset %d, topic or parition not found on this broker, %v", r.partition, r.topic, offset, r.brokers) + }) + conn.Close() + + // The next call to .initialize will re-establish a connection to the proper + // topic/partition broker combo. + r.stats.rebalances.observe(1) + break readLoop case NotLeaderForPartition: r.withErrorLogger(func(log *log.Logger) { log.Printf("failed to read from current broker for partition %d of %s at offset %d, not the leader", r.partition, r.topic, offset) @@ -1615,7 +1218,7 @@ func (r *reader) run(ctx context.Context, offset int64) { case RequestTimedOut: // Timeout on the kafka side, this can be safely retried. errcount = 0 - r.withErrorLogger(func(log *log.Logger) { + r.withLogger(func(log *log.Logger) { log.Printf("no messages received from kafka within the allocated time for partition %d of %s at offset %d", r.partition, r.topic, offset) }) r.stats.timeouts.observe(1) @@ -1656,6 +1259,13 @@ func (r *reader) run(ctx context.Context, offset int64) { conn.Close() return + case errUnknownCodec: + // The compression codec is either unsupported or has not been + // imported. This is a fatal error b/c the reader cannot + // proceed. + r.sendError(ctx, err) + break readLoop + default: if _, ok := err.(Error); ok { r.sendError(ctx, err) @@ -1677,8 +1287,7 @@ func (r *reader) run(ctx context.Context, offset int64) { func (r *reader) initialize(ctx context.Context, offset int64) (conn *Conn, start int64, err error) { for i := 0; i != len(r.brokers) && conn == nil; i++ { var broker = r.brokers[i] - var first int64 - var last int64 + var first, last int64 t0 := time.Now() conn, err = r.dialer.DialLeader(ctx, "tcp", broker, r.topic, r.partition) @@ -1697,10 +1306,10 @@ func (r *reader) initialize(ctx context.Context, offset int64) (conn *Conn, star } switch { - case offset == firstOffset: + case offset == FirstOffset: offset = first - case offset == lastOffset: + case offset == LastOffset: offset = last case offset < first: @@ -1711,7 +1320,7 @@ func (r *reader) initialize(ctx context.Context, offset int64) (conn *Conn, star log.Printf("the kafka reader for partition %d of %s is seeking to offset %d", r.partition, r.topic, offset) }) - if start, err = conn.Seek(offset, 1); err != nil { + if start, err = conn.Seek(offset, SeekAbsolute); err != nil { conn.Close() conn = nil break @@ -1730,7 +1339,11 @@ func (r *reader) read(ctx context.Context, offset int64, conn *Conn) (int64, err t0 := time.Now() conn.SetReadDeadline(t0.Add(r.maxWait)) - batch := conn.ReadBatch(r.minBytes, r.maxBytes) + batch := conn.ReadBatchWith(ReadBatchConfig{ + MinBytes: r.minBytes, + MaxBytes: r.maxBytes, + IsolationLevel: r.isolationLevel, + }) highWaterMark := batch.HighWaterMark() t1 := time.Now() @@ -1752,7 +1365,7 @@ func (r *reader) read(ctx context.Context, offset int64, conn *Conn) (int64, err } if msg, err = batch.ReadMessage(); err != nil { - err = batch.Close() + batch.Close() break } @@ -1761,7 +1374,7 @@ func (r *reader) read(ctx context.Context, offset int64, conn *Conn) (int64, err r.stats.bytes.observe(n) if err = r.sendMessage(ctx, msg, highWaterMark); err != nil { - err = batch.Close() + batch.Close() break } @@ -1782,7 +1395,7 @@ func (r *reader) read(ctx context.Context, offset int64, conn *Conn) (int64, err return offset, err } -func (r *reader) readOffsets(conn *Conn) (first int64, last int64, err error) { +func (r *reader) readOffsets(conn *Conn) (first, last int64, err error) { conn.SetDeadline(time.Now().Add(10 * time.Second)) return conn.ReadOffsets() } @@ -1821,12 +1434,12 @@ func (r *reader) withErrorLogger(do func(*log.Logger)) { // extractTopics returns the unique list of topics represented by the set of // provided members -func extractTopics(members []memberGroupMetadata) []string { +func extractTopics(members []GroupMember) []string { var visited = map[string]struct{}{} var topics []string for _, member := range members { - for _, topic := range member.Metadata.Topics { + for _, topic := range member.Topics { if _, seen := visited[topic]; seen { continue } diff --git a/vendor/github.com/segmentio/kafka-go/rungroup.go b/vendor/github.com/segmentio/kafka-go/rungroup.go deleted file mode 100644 index b8cd704f0..000000000 --- a/vendor/github.com/segmentio/kafka-go/rungroup.go +++ /dev/null @@ -1,61 +0,0 @@ -package kafka - -import ( - "context" - "sync" -) - -// runGroup is a collection of goroutines working together. If any one goroutine -// stops, then all goroutines will be stopped. -// -// A zero runGroup is valid -type runGroup struct { - initOnce sync.Once - - ctx context.Context - cancel context.CancelFunc - - wg sync.WaitGroup -} - -func (r *runGroup) init() { - if r.cancel == nil { - r.ctx, r.cancel = context.WithCancel(context.Background()) - } -} - -func (r *runGroup) WithContext(ctx context.Context) *runGroup { - ctx, cancel := context.WithCancel(ctx) - return &runGroup{ - ctx: ctx, - cancel: cancel, - } -} - -// Wait blocks until all function calls have returned. -func (r *runGroup) Wait() { - r.wg.Wait() -} - -// Stop stops the goroutines and waits for them to complete -func (r *runGroup) Stop() { - r.initOnce.Do(r.init) - r.cancel() - r.Wait() -} - -// Go calls the given function in a new goroutine. -// -// The first call to return a non-nil error cancels the group; its error will be -// returned by Wait. -func (r *runGroup) Go(f func(stop <-chan struct{})) { - r.initOnce.Do(r.init) - - r.wg.Add(1) - go func() { - defer r.wg.Done() - defer r.cancel() - - f(r.ctx.Done()) - }() -} diff --git a/vendor/github.com/segmentio/kafka-go/sasl/sasl.go b/vendor/github.com/segmentio/kafka-go/sasl/sasl.go new file mode 100644 index 000000000..eb07f64fb --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/sasl/sasl.go @@ -0,0 +1,44 @@ +package sasl + +import "context" + +// Mechanism implements the SASL state machine for a particular mode of +// authentication. It is used by the kafka.Dialer to perform the SASL +// handshake. +// +// A Mechanism must be re-usable and safe for concurrent access by multiple +// goroutines. +type Mechanism interface { + // Name returns the identifier for this SASL mechanism. This string will be + // passed to the SASL handshake request and much match one of the mechanisms + // supported by Kafka. + Name() string + + // Start begins SASL authentication. It returns an authentication state + // machine and "initial response" data (if required by the selected + // mechanism). A non-nil error causes the client to abort the authentication + // attempt. + // + // A nil ir value is different from a zero-length value. The nil value + // indicates that the selected mechanism does not use an initial response, + // while a zero-length value indicates an empty initial response, which must + // be sent to the server. + Start(ctx context.Context) (sess StateMachine, ir []byte, err error) +} + +// StateMachine implements the SASL challenge/response flow for a single SASL +// handshake. A StateMachine will be created by the Mechanism per connection, +// so it does not need to be safe for concurrent access by multiple goroutines. +// +// Once the StateMachine is created by the Mechanism, the caller loops by +// passing the server's response into Next and then sending Next's returned +// bytes to the server. Eventually either Next will indicate that the +// authentication has been successfully completed via the done return value, or +// it will indicate that the authentication failed by returning a non-nil error. +type StateMachine interface { + // Next continues challenge-response authentication. A non-nil error + // indicates that the client should abort the authentication attempt. If + // the client has been successfully authenticated, then the done return + // value will be true. + Next(ctx context.Context, challenge []byte) (done bool, response []byte, err error) +} diff --git a/vendor/github.com/segmentio/kafka-go/saslauthenticate.go b/vendor/github.com/segmentio/kafka-go/saslauthenticate.go new file mode 100644 index 000000000..ad1292918 --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/saslauthenticate.go @@ -0,0 +1,54 @@ +package kafka + +import ( + "bufio" +) + +type saslAuthenticateRequestV0 struct { + // Data holds the SASL payload + Data []byte +} + +func (t saslAuthenticateRequestV0) size() int32 { + return sizeofBytes(t.Data) +} + +func (t *saslAuthenticateRequestV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) { + return readBytes(r, sz, &t.Data) +} + +func (t saslAuthenticateRequestV0) writeTo(wb *writeBuffer) { + wb.writeBytes(t.Data) +} + +type saslAuthenticateResponseV0 struct { + // ErrorCode holds response error code + ErrorCode int16 + + ErrorMessage string + + Data []byte +} + +func (t saslAuthenticateResponseV0) size() int32 { + return sizeofInt16(t.ErrorCode) + sizeofString(t.ErrorMessage) + sizeofBytes(t.Data) +} + +func (t saslAuthenticateResponseV0) writeTo(wb *writeBuffer) { + wb.writeInt16(t.ErrorCode) + wb.writeString(t.ErrorMessage) + wb.writeBytes(t.Data) +} + +func (t *saslAuthenticateResponseV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) { + if remain, err = readInt16(r, sz, &t.ErrorCode); err != nil { + return + } + if remain, err = readString(r, remain, &t.ErrorMessage); err != nil { + return + } + if remain, err = readBytes(r, remain, &t.Data); err != nil { + return + } + return +} diff --git a/vendor/github.com/segmentio/kafka-go/saslhandshake.go b/vendor/github.com/segmentio/kafka-go/saslhandshake.go new file mode 100644 index 000000000..e83a1cb4b --- /dev/null +++ b/vendor/github.com/segmentio/kafka-go/saslhandshake.go @@ -0,0 +1,53 @@ +package kafka + +import ( + "bufio" +) + +// saslHandshakeRequestV0 implements the format for V0 and V1 SASL +// requests (they are identical) +type saslHandshakeRequestV0 struct { + // Mechanism holds the SASL Mechanism chosen by the client. + Mechanism string +} + +func (t saslHandshakeRequestV0) size() int32 { + return sizeofString(t.Mechanism) +} + +func (t *saslHandshakeRequestV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) { + return readString(r, sz, &t.Mechanism) +} + +func (t saslHandshakeRequestV0) writeTo(wb *writeBuffer) { + wb.writeString(t.Mechanism) +} + +// saslHandshakeResponseV0 implements the format for V0 and V1 SASL +// responses (they are identical) +type saslHandshakeResponseV0 struct { + // ErrorCode holds response error code + ErrorCode int16 + + // Array of mechanisms enabled in the server + EnabledMechanisms []string +} + +func (t saslHandshakeResponseV0) size() int32 { + return sizeofInt16(t.ErrorCode) + sizeofStringArray(t.EnabledMechanisms) +} + +func (t saslHandshakeResponseV0) writeTo(wb *writeBuffer) { + wb.writeInt16(t.ErrorCode) + wb.writeStringArray(t.EnabledMechanisms) +} + +func (t *saslHandshakeResponseV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) { + if remain, err = readInt16(r, sz, &t.ErrorCode); err != nil { + return + } + if remain, err = readStringArray(r, remain, &t.EnabledMechanisms); err != nil { + return + } + return +} diff --git a/vendor/github.com/segmentio/kafka-go/sizeof.go b/vendor/github.com/segmentio/kafka-go/sizeof.go index 87feb1a14..4c6d5b4cd 100644 --- a/vendor/github.com/segmentio/kafka-go/sizeof.go +++ b/vendor/github.com/segmentio/kafka-go/sizeof.go @@ -48,6 +48,13 @@ func sizeofString(s string) int32 { return 2 + int32(len(s)) } +func sizeofNullableString(s *string) int32 { + if s == nil { + return 2 + } + return sizeofString(*s) +} + func sizeofBool(_ bool) int32 { return 1 } diff --git a/vendor/github.com/segmentio/kafka-go/strategy.go b/vendor/github.com/segmentio/kafka-go/strategy.go deleted file mode 100644 index 5237c1cba..000000000 --- a/vendor/github.com/segmentio/kafka-go/strategy.go +++ /dev/null @@ -1,187 +0,0 @@ -package kafka - -import "sort" - -// strategy encapsulates the client side rebalancing logic -type strategy interface { - // ProtocolName of strategy - ProtocolName() string - - // ProtocolMetadata provides the strategy an opportunity to embed custom - // UserData into the metadata. - // - // Will be used by JoinGroup to begin the consumer group handshake. - // - // See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-JoinGroupRequest - GroupMetadata(topics []string) (groupMetadata, error) - - // DefineMemberships returns which members will be consuming - // which topic partitions - AssignGroups(members []memberGroupMetadata, partitions []Partition) memberGroupAssignments -} - -var ( - // allStrategies the kafka-go Reader supports - allStrategies = []strategy{ - rangeStrategy{}, - roundrobinStrategy{}, - } -) - -// rangeStrategy groups consumers by partition -// -// Example: 5 partitions, 2 consumers -// C0: [0, 1, 2] -// C1: [3, 4] -// -// Example: 6 partitions, 3 consumers -// C0: [0, 1] -// C1: [2, 3] -// C2: [4, 5] -// -type rangeStrategy struct{} - -func (r rangeStrategy) ProtocolName() string { - return "range" -} - -func (r rangeStrategy) GroupMetadata(topics []string) (groupMetadata, error) { - return groupMetadata{ - Version: 1, - Topics: topics, - }, nil -} - -func (r rangeStrategy) AssignGroups(members []memberGroupMetadata, topicPartitions []Partition) memberGroupAssignments { - groupAssignments := memberGroupAssignments{} - membersByTopic := findMembersByTopic(members) - - for topic, members := range membersByTopic { - partitions := findPartitions(topic, topicPartitions) - partitionCount := len(partitions) - memberCount := len(members) - - rangeSize := partitionCount / memberCount - if partitionCount%memberCount != 0 { - rangeSize++ - } - - for memberIndex, member := range members { - assignmentsByTopic, ok := groupAssignments[member.MemberID] - if !ok { - assignmentsByTopic = map[string][]int32{} - groupAssignments[member.MemberID] = assignmentsByTopic - } - - for partitionIndex, partition := range partitions { - if (partitionIndex / rangeSize) == memberIndex { - assignmentsByTopic[topic] = append(assignmentsByTopic[topic], partition) - } - } - } - } - - return groupAssignments -} - -// roundrobinStrategy divides partitions evenly among consumers -// -// Example: 5 partitions, 2 consumers -// C0: [0, 2, 4] -// C1: [1, 3] -// -// Example: 6 partitions, 3 consumers -// C0: [0, 3] -// C1: [1, 4] -// C2: [2, 5] -// -type roundrobinStrategy struct{} - -func (r roundrobinStrategy) ProtocolName() string { - return "roundrobin" -} - -func (r roundrobinStrategy) GroupMetadata(topics []string) (groupMetadata, error) { - return groupMetadata{ - Version: 1, - Topics: topics, - }, nil -} - -func (r roundrobinStrategy) AssignGroups(members []memberGroupMetadata, topicPartitions []Partition) memberGroupAssignments { - groupAssignments := memberGroupAssignments{} - membersByTopic := findMembersByTopic(members) - for topic, members := range membersByTopic { - partitionIDs := findPartitions(topic, topicPartitions) - memberCount := len(members) - - for memberIndex, member := range members { - assignmentsByTopic, ok := groupAssignments[member.MemberID] - if !ok { - assignmentsByTopic = map[string][]int32{} - groupAssignments[member.MemberID] = assignmentsByTopic - } - - for partitionIndex, partition := range partitionIDs { - if (partitionIndex % memberCount) == memberIndex { - assignmentsByTopic[topic] = append(assignmentsByTopic[topic], partition) - } - } - } - } - - return groupAssignments -} - -// findPartitions extracts the partition ids associated with the topic from the -// list of Partitions provided -func findPartitions(topic string, partitions []Partition) []int32 { - var ids []int32 - for _, partition := range partitions { - if partition.Topic == topic { - ids = append(ids, int32(partition.ID)) - } - } - return ids -} - -// findMembersByTopic groups the memberGroupMetadata by topic -func findMembersByTopic(members []memberGroupMetadata) map[string][]memberGroupMetadata { - membersByTopic := map[string][]memberGroupMetadata{} - for _, member := range members { - for _, topic := range member.Metadata.Topics { - membersByTopic[topic] = append(membersByTopic[topic], member) - } - } - - // normalize ordering of members to enabling grouping across topics by partitions - // - // Want: - // C0 [T0/P0, T1/P0] - // C1 [T0/P1, T1/P1] - // - // Not: - // C0 [T0/P0, T1/P1] - // C1 [T0/P1, T1/P0] - // - // Even though the later is still round robin, the partitions are crossed - // - for _, members := range membersByTopic { - sort.Slice(members, func(i, j int) bool { - return members[i].MemberID < members[j].MemberID - }) - } - - return membersByTopic -} - -// findStrategy returns the strategy with the specified protocolName from the -// slice provided -func findStrategy(protocolName string, strategies []strategy) (strategy, bool) { - for _, strategy := range strategies { - if strategy.ProtocolName() == protocolName { - return strategy, true - } - } - return nil, false -} diff --git a/vendor/github.com/segmentio/kafka-go/syncgroup.go b/vendor/github.com/segmentio/kafka-go/syncgroup.go index 241a9ea9c..fd9f135b2 100644 --- a/vendor/github.com/segmentio/kafka-go/syncgroup.go +++ b/vendor/github.com/segmentio/kafka-go/syncgroup.go @@ -5,9 +5,6 @@ import ( "bytes" ) -// memberGroupAssignments holds MemberID => topic => partitions -type memberGroupAssignments map[string]map[string][]int32 - type groupAssignment struct { Version int16 Topics map[string][]int32 @@ -24,16 +21,16 @@ func (t groupAssignment) size() int32 { return sz + sizeofBytes(t.UserData) } -func (t groupAssignment) writeTo(w *bufio.Writer) { - writeInt16(w, t.Version) - writeInt32(w, int32(len(t.Topics))) +func (t groupAssignment) writeTo(wb *writeBuffer) { + wb.writeInt16(t.Version) + wb.writeInt32(int32(len(t.Topics))) for topic, partitions := range t.Topics { - writeString(w, topic) - writeInt32Array(w, partitions) + wb.writeString(topic) + wb.writeInt32Array(partitions) } - writeBytes(w, t.UserData) + wb.writeBytes(t.UserData) } func (t *groupAssignment) readFrom(r *bufio.Reader, size int) (remain int, err error) { @@ -60,13 +57,11 @@ func (t *groupAssignment) readFrom(r *bufio.Reader, size int) (remain int, err e func (t groupAssignment) bytes() []byte { buf := bytes.NewBuffer(nil) - w := bufio.NewWriter(buf) - t.writeTo(w) - w.Flush() + t.writeTo(&writeBuffer{w: buf}) return buf.Bytes() } -type syncGroupRequestGroupAssignmentV1 struct { +type syncGroupRequestGroupAssignmentV0 struct { // MemberID assigned by the group coordinator MemberID string @@ -76,17 +71,17 @@ type syncGroupRequestGroupAssignmentV1 struct { MemberAssignments []byte } -func (t syncGroupRequestGroupAssignmentV1) size() int32 { +func (t syncGroupRequestGroupAssignmentV0) size() int32 { return sizeofString(t.MemberID) + sizeofBytes(t.MemberAssignments) } -func (t syncGroupRequestGroupAssignmentV1) writeTo(w *bufio.Writer) { - writeString(w, t.MemberID) - writeBytes(w, t.MemberAssignments) +func (t syncGroupRequestGroupAssignmentV0) writeTo(wb *writeBuffer) { + wb.writeString(t.MemberID) + wb.writeBytes(t.MemberAssignments) } -type syncGroupRequestV1 struct { +type syncGroupRequestV0 struct { // GroupID holds the unique group identifier GroupID string @@ -96,29 +91,24 @@ type syncGroupRequestV1 struct { // MemberID assigned by the group coordinator MemberID string - GroupAssignments []syncGroupRequestGroupAssignmentV1 + GroupAssignments []syncGroupRequestGroupAssignmentV0 } -func (t syncGroupRequestV1) size() int32 { +func (t syncGroupRequestV0) size() int32 { return sizeofString(t.GroupID) + sizeofInt32(t.GenerationID) + sizeofString(t.MemberID) + sizeofArray(len(t.GroupAssignments), func(i int) int32 { return t.GroupAssignments[i].size() }) } -func (t syncGroupRequestV1) writeTo(w *bufio.Writer) { - writeString(w, t.GroupID) - writeInt32(w, t.GenerationID) - writeString(w, t.MemberID) - writeArray(w, len(t.GroupAssignments), func(i int) { t.GroupAssignments[i].writeTo(w) }) +func (t syncGroupRequestV0) writeTo(wb *writeBuffer) { + wb.writeString(t.GroupID) + wb.writeInt32(t.GenerationID) + wb.writeString(t.MemberID) + wb.writeArray(len(t.GroupAssignments), func(i int) { t.GroupAssignments[i].writeTo(wb) }) } -type syncGroupResponseV1 struct { - // ThrottleTimeMS holds the duration in milliseconds for which the request - // was throttled due to quota violation (Zero if the request did not violate - // any quota) - ThrottleTimeMS int32 - +type syncGroupResponseV0 struct { // ErrorCode holds response error code ErrorCode int16 @@ -128,23 +118,18 @@ type syncGroupResponseV1 struct { MemberAssignments []byte } -func (t syncGroupResponseV1) size() int32 { - return sizeofInt32(t.ThrottleTimeMS) + - sizeofInt16(t.ErrorCode) + +func (t syncGroupResponseV0) size() int32 { + return sizeofInt16(t.ErrorCode) + sizeofBytes(t.MemberAssignments) } -func (t syncGroupResponseV1) writeTo(w *bufio.Writer) { - writeInt32(w, t.ThrottleTimeMS) - writeInt16(w, t.ErrorCode) - writeBytes(w, t.MemberAssignments) +func (t syncGroupResponseV0) writeTo(wb *writeBuffer) { + wb.writeInt16(t.ErrorCode) + wb.writeBytes(t.MemberAssignments) } -func (t *syncGroupResponseV1) readFrom(r *bufio.Reader, sz int) (remain int, err error) { - if remain, err = readInt32(r, sz, &t.ThrottleTimeMS); err != nil { - return - } - if remain, err = readInt16(r, remain, &t.ErrorCode); err != nil { +func (t *syncGroupResponseV0) readFrom(r *bufio.Reader, sz int) (remain int, err error) { + if remain, err = readInt16(r, sz, &t.ErrorCode); err != nil { return } if remain, err = readBytes(r, remain, &t.MemberAssignments); err != nil { diff --git a/vendor/github.com/segmentio/kafka-go/write.go b/vendor/github.com/segmentio/kafka-go/write.go index c20ed6b3c..32ca49c88 100644 --- a/vendor/github.com/segmentio/kafka-go/write.go +++ b/vendor/github.com/segmentio/kafka-go/write.go @@ -1,121 +1,174 @@ package kafka import ( - "bufio" + "bytes" "encoding/binary" "fmt" + "hash/crc32" + "io" "time" ) -type writable interface { - writeTo(*bufio.Writer) +type writeBuffer struct { + w io.Writer + b [16]byte +} + +func (wb *writeBuffer) writeInt8(i int8) { + wb.b[0] = byte(i) + wb.Write(wb.b[:1]) } -func writeInt8(w *bufio.Writer, i int8) { - w.WriteByte(byte(i)) +func (wb *writeBuffer) writeInt16(i int16) { + binary.BigEndian.PutUint16(wb.b[:2], uint16(i)) + wb.Write(wb.b[:2]) } -func writeInt16(w *bufio.Writer, i int16) { - var b [2]byte - binary.BigEndian.PutUint16(b[:], uint16(i)) - w.WriteByte(b[0]) - w.WriteByte(b[1]) +func (wb *writeBuffer) writeInt32(i int32) { + binary.BigEndian.PutUint32(wb.b[:4], uint32(i)) + wb.Write(wb.b[:4]) } -func writeInt32(w *bufio.Writer, i int32) { - var b [4]byte - binary.BigEndian.PutUint32(b[:], uint32(i)) - w.WriteByte(b[0]) - w.WriteByte(b[1]) - w.WriteByte(b[2]) - w.WriteByte(b[3]) +func (wb *writeBuffer) writeInt64(i int64) { + binary.BigEndian.PutUint64(wb.b[:8], uint64(i)) + wb.Write(wb.b[:8]) +} + +func (wb *writeBuffer) writeVarInt(i int64) { + u := uint64((i << 1) ^ (i >> 63)) + n := 0 + + for u >= 0x80 && n < len(wb.b) { + wb.b[n] = byte(u) | 0x80 + u >>= 7 + n++ + } + + if n < len(wb.b) { + wb.b[n] = byte(u) + n++ + } + + wb.Write(wb.b[:n]) } -func writeInt64(w *bufio.Writer, i int64) { - var b [8]byte - binary.BigEndian.PutUint64(b[:], uint64(i)) - w.WriteByte(b[0]) - w.WriteByte(b[1]) - w.WriteByte(b[2]) - w.WriteByte(b[3]) - w.WriteByte(b[4]) - w.WriteByte(b[5]) - w.WriteByte(b[6]) - w.WriteByte(b[7]) +func (wb *writeBuffer) writeString(s string) { + wb.writeInt16(int16(len(s))) + wb.WriteString(s) } -func writeString(w *bufio.Writer, s string) { - writeInt16(w, int16(len(s))) - w.WriteString(s) +func (wb *writeBuffer) writeVarString(s string) { + wb.writeVarInt(int64(len(s))) + wb.WriteString(s) } -func writeBytes(w *bufio.Writer, b []byte) { +func (wb *writeBuffer) writeNullableString(s *string) { + if s == nil { + wb.writeInt16(-1) + } else { + wb.writeString(*s) + } +} + +func (wb *writeBuffer) writeBytes(b []byte) { n := len(b) if b == nil { n = -1 } - writeInt32(w, int32(n)) - w.Write(b) + wb.writeInt32(int32(n)) + wb.Write(b) +} + +func (wb *writeBuffer) writeVarBytes(b []byte) { + if b != nil { + wb.writeVarInt(int64(len(b))) + wb.Write(b) + } else { + //-1 is used to indicate nil key + wb.writeVarInt(-1) + } } -func writeBool(w *bufio.Writer, b bool) { +func (wb *writeBuffer) writeBool(b bool) { v := int8(0) if b { v = 1 } - writeInt8(w, v) + wb.writeInt8(v) } -func writeArrayLen(w *bufio.Writer, n int) { - writeInt32(w, int32(n)) +func (wb *writeBuffer) writeArrayLen(n int) { + wb.writeInt32(int32(n)) } -func writeArray(w *bufio.Writer, n int, f func(int)) { - writeArrayLen(w, n) - for i := 0; i != n; i++ { +func (wb *writeBuffer) writeArray(n int, f func(int)) { + wb.writeArrayLen(n) + for i := 0; i < n; i++ { f(i) } } -func writeStringArray(w *bufio.Writer, a []string) { - writeArray(w, len(a), func(i int) { writeString(w, a[i]) }) +func (wb *writeBuffer) writeVarArray(n int, f func(int)) { + wb.writeVarInt(int64(n)) + for i := 0; i < n; i++ { + f(i) + } } -func writeInt32Array(w *bufio.Writer, a []int32) { - writeArray(w, len(a), func(i int) { writeInt32(w, a[i]) }) +func (wb *writeBuffer) writeStringArray(a []string) { + wb.writeArray(len(a), func(i int) { wb.writeString(a[i]) }) } -func write(w *bufio.Writer, a interface{}) { +func (wb *writeBuffer) writeInt32Array(a []int32) { + wb.writeArray(len(a), func(i int) { wb.writeInt32(a[i]) }) +} + +func (wb *writeBuffer) write(a interface{}) { switch v := a.(type) { case int8: - writeInt8(w, v) + wb.writeInt8(v) case int16: - writeInt16(w, v) + wb.writeInt16(v) case int32: - writeInt32(w, v) + wb.writeInt32(v) case int64: - writeInt64(w, v) + wb.writeInt64(v) case string: - writeString(w, v) + wb.writeString(v) case []byte: - writeBytes(w, v) + wb.writeBytes(v) case bool: - writeBool(w, v) + wb.writeBool(v) case writable: - v.writeTo(w) + v.writeTo(wb) default: panic(fmt.Sprintf("unsupported type: %T", a)) } } -// The functions bellow are used as optimizations to avoid dynamic memory -// allocations that occur when building the data structures representing the -// kafka protocol requests. +func (wb *writeBuffer) Write(b []byte) (int, error) { + return wb.w.Write(b) +} + +func (wb *writeBuffer) WriteString(s string) (int, error) { + return io.WriteString(wb.w, s) +} + +func (wb *writeBuffer) Flush() error { + if x, ok := wb.w.(interface{ Flush() error }); ok { + return x.Flush() + } + return nil +} + +type writable interface { + writeTo(*writeBuffer) +} -func writeFetchRequestV1(w *bufio.Writer, correlationID int32, clientID string, topic string, partition int32, offset int64, minBytes int, maxBytes int, maxWait time.Duration) error { +func (wb *writeBuffer) writeFetchRequestV2(correlationID int32, clientID, topic string, partition int32, offset int64, minBytes, maxBytes int, maxWait time.Duration) error { h := requestHeader{ ApiKey: int16(fetchRequest), - ApiVersion: int16(v1), + ApiVersion: int16(v2), CorrelationID: correlationID, ClientID: clientID, } @@ -130,25 +183,119 @@ func writeFetchRequestV1(w *bufio.Writer, correlationID int32, clientID string, 8 + // offset 4 // max bytes - h.writeTo(w) - writeInt32(w, -1) // replica ID - writeInt32(w, milliseconds(maxWait)) - writeInt32(w, int32(minBytes)) + h.writeTo(wb) + wb.writeInt32(-1) // replica ID + wb.writeInt32(milliseconds(maxWait)) + wb.writeInt32(int32(minBytes)) // topic array - writeArrayLen(w, 1) - writeString(w, topic) + wb.writeArrayLen(1) + wb.writeString(topic) // partition array - writeArrayLen(w, 1) - writeInt32(w, partition) - writeInt64(w, offset) - writeInt32(w, int32(maxBytes)) + wb.writeArrayLen(1) + wb.writeInt32(partition) + wb.writeInt64(offset) + wb.writeInt32(int32(maxBytes)) - return w.Flush() + return wb.Flush() } -func writeListOffsetRequestV1(w *bufio.Writer, correlationID int32, clientID string, topic string, partition int32, time int64) error { +func (wb *writeBuffer) writeFetchRequestV5(correlationID int32, clientID, topic string, partition int32, offset int64, minBytes, maxBytes int, maxWait time.Duration, isolationLevel int8) error { + h := requestHeader{ + ApiKey: int16(fetchRequest), + ApiVersion: int16(v5), + CorrelationID: correlationID, + ClientID: clientID, + } + h.Size = (h.size() - 4) + + 4 + // replica ID + 4 + // max wait time + 4 + // min bytes + 4 + // max bytes + 1 + // isolation level + 4 + // topic array length + sizeofString(topic) + + 4 + // partition array length + 4 + // partition + 8 + // offset + 8 + // log start offset + 4 // max bytes + + h.writeTo(wb) + wb.writeInt32(-1) // replica ID + wb.writeInt32(milliseconds(maxWait)) + wb.writeInt32(int32(minBytes)) + wb.writeInt32(int32(maxBytes)) + wb.writeInt8(isolationLevel) // isolation level 0 - read uncommitted + + // topic array + wb.writeArrayLen(1) + wb.writeString(topic) + + // partition array + wb.writeArrayLen(1) + wb.writeInt32(partition) + wb.writeInt64(offset) + wb.writeInt64(int64(0)) // log start offset only used when is sent by follower + wb.writeInt32(int32(maxBytes)) + + return wb.Flush() +} + +func (wb *writeBuffer) writeFetchRequestV10(correlationID int32, clientID, topic string, partition int32, offset int64, minBytes, maxBytes int, maxWait time.Duration, isolationLevel int8) error { + h := requestHeader{ + ApiKey: int16(fetchRequest), + ApiVersion: int16(v10), + CorrelationID: correlationID, + ClientID: clientID, + } + h.Size = (h.size() - 4) + + 4 + // replica ID + 4 + // max wait time + 4 + // min bytes + 4 + // max bytes + 1 + // isolation level + 4 + // session ID + 4 + // session epoch + 4 + // topic array length + sizeofString(topic) + + 4 + // partition array length + 4 + // partition + 4 + // current leader epoch + 8 + // fetch offset + 8 + // log start offset + 4 + // partition max bytes + 4 // forgotten topics data + + h.writeTo(wb) + wb.writeInt32(-1) // replica ID + wb.writeInt32(milliseconds(maxWait)) + wb.writeInt32(int32(minBytes)) + wb.writeInt32(int32(maxBytes)) + wb.writeInt8(isolationLevel) // isolation level 0 - read uncommitted + wb.writeInt32(0) //FIXME + wb.writeInt32(-1) //FIXME + + // topic array + wb.writeArrayLen(1) + wb.writeString(topic) + + // partition array + wb.writeArrayLen(1) + wb.writeInt32(partition) + wb.writeInt32(-1) //FIXME + wb.writeInt64(offset) + wb.writeInt64(int64(0)) // log start offset only used when is sent by follower + wb.writeInt32(int32(maxBytes)) + + // forgotten topics array + wb.writeArrayLen(0) // forgotten topics not supported yet + + return wb.Flush() +} + +func (wb *writeBuffer) writeListOffsetRequestV1(correlationID int32, clientID, topic string, partition int32, time int64) error { h := requestHeader{ ApiKey: int16(listOffsetRequest), ApiVersion: int16(v1), @@ -163,42 +310,98 @@ func writeListOffsetRequestV1(w *bufio.Writer, correlationID int32, clientID str 4 + // partition 8 // time - h.writeTo(w) - writeInt32(w, -1) // replica ID + h.writeTo(wb) + wb.writeInt32(-1) // replica ID // topic array - writeArrayLen(w, 1) - writeString(w, topic) + wb.writeArrayLen(1) + wb.writeString(topic) // partition array - writeArrayLen(w, 1) - writeInt32(w, partition) - writeInt64(w, time) + wb.writeArrayLen(1) + wb.writeInt32(partition) + wb.writeInt64(time) - return w.Flush() + return wb.Flush() } -func writeProduceRequestV2(w *bufio.Writer, correlationID int32, clientID string, topic string, partition int32, timeout time.Duration, requiredAcks int16, msgs ...Message) error { +func (wb *writeBuffer) writeProduceRequestV2(codec CompressionCodec, correlationID int32, clientID, topic string, partition int32, timeout time.Duration, requiredAcks int16, msgs ...Message) (err error) { var size int32 + var attributes int8 + var compressed *bytes.Buffer + + if codec == nil { + size = messageSetSize(msgs...) + } else { + compressed, attributes, size, err = compressMessageSet(codec, msgs...) + if err != nil { + return + } + msgs = []Message{{Value: compressed.Bytes()}} + } + + h := requestHeader{ + ApiKey: int16(produceRequest), + ApiVersion: int16(v2), + CorrelationID: correlationID, + ClientID: clientID, + } + h.Size = (h.size() - 4) + + 2 + // required acks + 4 + // timeout + 4 + // topic array length + sizeofString(topic) + // topic + 4 + // partition array length + 4 + // partition + 4 + // message set size + size + + h.writeTo(wb) + wb.writeInt16(requiredAcks) // required acks + wb.writeInt32(milliseconds(timeout)) + + // topic array + wb.writeArrayLen(1) + wb.writeString(topic) + + // partition array + wb.writeArrayLen(1) + wb.writeInt32(partition) + + wb.writeInt32(size) + cw := &crc32Writer{table: crc32.IEEETable} for _, msg := range msgs { - size += 8 + // offset - 4 + // message size - 4 + // crc - 1 + // magic byte - 1 + // attributes - 8 + // timestamp - sizeofBytes(msg.Key) + - sizeofBytes(msg.Value) + wb.writeMessage(msg.Offset, attributes, msg.Time, msg.Key, msg.Value, cw) + } + + releaseBuffer(compressed) + return wb.Flush() +} + +func (wb *writeBuffer) writeProduceRequestV3(codec CompressionCodec, correlationID int32, clientID, topic string, partition int32, timeout time.Duration, requiredAcks int16, transactionalID *string, msgs ...Message) (err error) { + var size int32 + var attributes int16 + var compressed *bytes.Buffer + + if codec == nil { + size = recordBatchSize(msgs...) + } else { + compressed, attributes, size, err = compressRecordBatch(codec, msgs...) + if err != nil { + return + } } h := requestHeader{ ApiKey: int16(produceRequest), - ApiVersion: int16(v2), + ApiVersion: int16(v3), CorrelationID: correlationID, ClientID: clientID, } + h.Size = (h.size() - 4) + + sizeofNullableString(transactionalID) + 2 + // required acks 4 + // timeout 4 + // topic array length @@ -208,41 +411,317 @@ func writeProduceRequestV2(w *bufio.Writer, correlationID int32, clientID string 4 + // message set size size - h.writeTo(w) - writeInt16(w, requiredAcks) // required acks - writeInt32(w, milliseconds(timeout)) + h.writeTo(wb) + wb.writeNullableString(transactionalID) + wb.writeInt16(requiredAcks) // required acks + wb.writeInt32(milliseconds(timeout)) // topic array - writeArrayLen(w, 1) - writeString(w, topic) + wb.writeArrayLen(1) + wb.writeString(topic) // partition array - writeArrayLen(w, 1) - writeInt32(w, partition) - writeInt32(w, size) + wb.writeArrayLen(1) + wb.writeInt32(partition) + + wb.writeInt32(size) + baseTime := msgs[0].Time + lastTime := msgs[len(msgs)-1].Time + + if compressed != nil { + wb.writeRecordBatch(attributes, size, len(msgs), baseTime, lastTime, func(wb *writeBuffer) { + wb.Write(compressed.Bytes()) + }) + releaseBuffer(compressed) + } else { + wb.writeRecordBatch(attributes, size, len(msgs), baseTime, lastTime, func(wb *writeBuffer) { + for i, msg := range msgs { + wb.writeRecord(0, msgs[0].Time, int64(i), msg) + } + }) + } - const magicByte = 1 - const attributes = 0 + return wb.Flush() +} + +func (wb *writeBuffer) writeProduceRequestV7(codec CompressionCodec, correlationID int32, clientID, topic string, partition int32, timeout time.Duration, requiredAcks int16, transactionalID *string, msgs ...Message) (err error) { + var size int32 + var attributes int16 + var compressed *bytes.Buffer + + if codec == nil { + size = recordBatchSize(msgs...) + } else { + compressed, attributes, size, err = compressRecordBatch(codec, msgs...) + if err != nil { + return + } + } + h := requestHeader{ + ApiKey: int16(produceRequest), + ApiVersion: int16(v7), + CorrelationID: correlationID, + ClientID: clientID, + } + h.Size = (h.size() - 4) + + sizeofNullableString(transactionalID) + + 2 + // required acks + 4 + // timeout + 4 + // topic array length + sizeofString(topic) + // topic + 4 + // partition array length + 4 + // partition + 4 + // message set size + size + + h.writeTo(wb) + wb.writeNullableString(transactionalID) + wb.writeInt16(requiredAcks) // required acks + wb.writeInt32(milliseconds(timeout)) + + // topic array + wb.writeArrayLen(1) + wb.writeString(topic) + + // partition array + wb.writeArrayLen(1) + wb.writeInt32(partition) + + wb.writeInt32(size) + baseTime := msgs[0].Time + lastTime := msgs[len(msgs)-1].Time + + if compressed != nil { + wb.writeRecordBatch(attributes, size, len(msgs), baseTime, lastTime, func(wb *writeBuffer) { + wb.Write(compressed.Bytes()) + }) + releaseBuffer(compressed) + } else { + wb.writeRecordBatch(attributes, size, len(msgs), baseTime, lastTime, func(wb *writeBuffer) { + for i, msg := range msgs { + wb.writeRecord(0, msgs[0].Time, int64(i), msg) + } + }) + } + + return wb.Flush() +} + +func (wb *writeBuffer) writeRecordBatch(attributes int16, size int32, count int, baseTime, lastTime time.Time, write func(*writeBuffer)) { + var ( + baseTimestamp = timestamp(baseTime) + lastTimestamp = timestamp(lastTime) + lastOffsetDelta = int32(count - 1) + producerID = int64(-1) // default producer id for now + producerEpoch = int16(-1) // default producer epoch for now + baseSequence = int32(-1) // default base sequence + recordCount = int32(count) // record count + writerBackup = wb.w + ) + + // dry run to compute the checksum + cw := &crc32Writer{table: crc32.MakeTable(crc32.Castagnoli)} + wb.w = cw + cw.writeInt16(attributes) // attributes, timestamp type 0 - create time, not part of a transaction, no control messages + cw.writeInt32(lastOffsetDelta) + cw.writeInt64(baseTimestamp) + cw.writeInt64(lastTimestamp) + cw.writeInt64(producerID) + cw.writeInt16(producerEpoch) + cw.writeInt32(baseSequence) + cw.writeInt32(recordCount) + write(wb) + wb.w = writerBackup + + // actual write to the output buffer + wb.writeInt64(int64(0)) + wb.writeInt32(int32(size - 12)) // 12 = batch length + base offset sizes + wb.writeInt32(-1) // partition leader epoch + wb.writeInt8(2) // magic byte + wb.writeInt32(int32(cw.crc32)) + + wb.writeInt16(attributes) + wb.writeInt32(lastOffsetDelta) + wb.writeInt64(baseTimestamp) + wb.writeInt64(lastTimestamp) + wb.writeInt64(producerID) + wb.writeInt16(producerEpoch) + wb.writeInt32(baseSequence) + wb.writeInt32(recordCount) + write(wb) +} + +func compressMessageSet(codec CompressionCodec, msgs ...Message) (compressed *bytes.Buffer, attributes int8, size int32, err error) { + compressed = acquireBuffer() + compressor := codec.NewWriter(compressed) + wb := &writeBuffer{w: compressor} + cw := &crc32Writer{table: crc32.IEEETable} + + for offset, msg := range msgs { + wb.writeMessage(int64(offset), 0, msg.Time, msg.Key, msg.Value, cw) + } + + if err = compressor.Close(); err != nil { + releaseBuffer(compressed) + return + } + + attributes = codec.Code() + size = messageSetSize(Message{Value: compressed.Bytes()}) + return +} + +func compressRecordBatch(codec CompressionCodec, msgs ...Message) (compressed *bytes.Buffer, attributes int16, size int32, err error) { + compressed = acquireBuffer() + compressor := codec.NewWriter(compressed) + wb := &writeBuffer{w: compressor} + + for i, msg := range msgs { + wb.writeRecord(0, msgs[0].Time, int64(i), msg) + } + + if err = compressor.Close(); err != nil { + releaseBuffer(compressed) + return + } + + attributes = int16(codec.Code()) + size = recordBatchHeaderSize + int32(compressed.Len()) + return +} + +func (wb *writeBuffer) writeMessage(offset int64, attributes int8, time time.Time, key, value []byte, cw *crc32Writer) { + const magicByte = 1 // compatible with kafka 0.10.0.0+ + + timestamp := timestamp(time) + size := messageSize(key, value) + + // dry run to compute the checksum + cw.crc32 = 0 + cw.writeInt8(magicByte) + cw.writeInt8(attributes) + cw.writeInt64(timestamp) + cw.writeBytes(key) + cw.writeBytes(value) + + // actual write to the output buffer + wb.writeInt64(offset) + wb.writeInt32(size) + wb.writeInt32(int32(cw.crc32)) + wb.writeInt8(magicByte) + wb.writeInt8(attributes) + wb.writeInt64(timestamp) + wb.writeBytes(key) + wb.writeBytes(value) +} + +// Messages with magic >2 are called records. This method writes messages using message format 2. +func (wb *writeBuffer) writeRecord(attributes int8, baseTime time.Time, offset int64, msg Message) { + timestampDelta := msg.Time.Sub(baseTime) + offsetDelta := int64(offset) + + wb.writeVarInt(int64(recordSize(&msg, timestampDelta, offsetDelta))) + wb.writeInt8(attributes) + wb.writeVarInt(int64(milliseconds(timestampDelta))) + wb.writeVarInt(offsetDelta) + + wb.writeVarBytes(msg.Key) + wb.writeVarBytes(msg.Value) + wb.writeVarArray(len(msg.Headers), func(i int) { + h := &msg.Headers[i] + wb.writeVarString(h.Key) + wb.writeVarBytes(h.Value) + }) +} + +func varIntLen(i int64) int { + u := uint64((i << 1) ^ (i >> 63)) // zig-zag encoding + n := 0 + + for u >= 0x80 { + u >>= 7 + n++ + } + + return n + 1 +} + +func varBytesLen(b []byte) int { + return varIntLen(int64(len(b))) + len(b) +} + +func varStringLen(s string) int { + return varIntLen(int64(len(s))) + len(s) +} + +func varArrayLen(n int, f func(int) int) int { + size := varIntLen(int64(n)) + for i := 0; i < n; i++ { + size += f(i) + } + return size +} + +func messageSize(key, value []byte) int32 { + return 4 + // crc + 1 + // magic byte + 1 + // attributes + 8 + // timestamp + sizeofBytes(key) + + sizeofBytes(value) +} + +func messageSetSize(msgs ...Message) (size int32) { for _, msg := range msgs { - timestamp := timestamp(msg.Time) - crc32 := crc32OfMessage(magicByte, attributes, timestamp, msg.Key, msg.Value) - size := 4 + // crc + size += 8 + // offset + 4 + // message size + 4 + // crc 1 + // magic byte 1 + // attributes 8 + // timestamp sizeofBytes(msg.Key) + sizeofBytes(msg.Value) + } + return +} + +func recordSize(msg *Message, timestampDelta time.Duration, offsetDelta int64) int { + return 1 + // attributes + varIntLen(int64(milliseconds(timestampDelta))) + + varIntLen(offsetDelta) + + varBytesLen(msg.Key) + + varBytesLen(msg.Value) + + varArrayLen(len(msg.Headers), func(i int) int { + h := &msg.Headers[i] + return varStringLen(h.Key) + varBytesLen(h.Value) + }) +} - writeInt64(w, msg.Offset) - writeInt32(w, int32(size)) - writeInt32(w, int32(crc32)) - writeInt8(w, magicByte) - writeInt8(w, attributes) - writeInt64(w, timestamp) - writeBytes(w, msg.Key) - writeBytes(w, msg.Value) +const recordBatchHeaderSize int32 = 0 + + 8 + // base offset + 4 + // batch length + 4 + // partition leader epoch + 1 + // magic + 4 + // crc + 2 + // attributes + 4 + // last offset delta + 8 + // first timestamp + 8 + // max timestamp + 8 + // producer id + 2 + // producer epoch + 4 + // base sequence + 4 // msg count + +func recordBatchSize(msgs ...Message) (size int32) { + size = recordBatchHeaderSize + baseTime := msgs[0].Time + + for i := range msgs { + msg := &msgs[i] + msz := recordSize(msg, msg.Time.Sub(baseTime), int64(i)) + size += int32(msz + varIntLen(int64(msz))) } - return w.Flush() + return } diff --git a/vendor/github.com/segmentio/kafka-go/writer.go b/vendor/github.com/segmentio/kafka-go/writer.go index 375ee5e0c..d1c97c1c4 100644 --- a/vendor/github.com/segmentio/kafka-go/writer.go +++ b/vendor/github.com/segmentio/kafka-go/writer.go @@ -2,8 +2,10 @@ package kafka import ( "context" + "errors" "fmt" "io" + "log" "math/rand" "sort" "sync" @@ -72,6 +74,12 @@ type WriterConfig struct { // The default is to use a target batch size of 100 messages. BatchSize int + // Limit the maximum size of a request in bytes before being sent to + // a partition. + // + // The default is to use a kafka default value of 1048576. + BatchBytes int + // Time limit on how often incomplete message batches will be flushed to // kafka. // @@ -106,6 +114,18 @@ type WriterConfig struct { // whether the messages were written to kafka. Async bool + // CompressionCodec set the codec to be used to compress Kafka messages. + // Note that messages are allowed to overwrite the compression codec individually. + CompressionCodec + + // If not nil, specifies a logger used to report internal changes within the + // writer. + Logger *log.Logger + + // ErrorLogger is the logger used to report errors. If nil, the writer falls + // back to using Logger instead. + ErrorLogger *log.Logger + newPartitionWriter func(partition int, config WriterConfig, stats *writerStats) partitionWriter } @@ -119,11 +139,12 @@ type WriterStats struct { Rebalances int64 `metric:"kafka.writer.rebalance.count" type:"counter"` Errors int64 `metric:"kafka.writer.error.count" type:"counter"` - DialTime DurationStats `metric:"kafka.writer.dial.seconds"` - WriteTime DurationStats `metric:"kafka.writer.write.seconds"` - WaitTime DurationStats `metric:"kafka.writer.wait.seconds"` - Retries SummaryStats `metric:"kafka.writer.retries.count"` - BatchSize SummaryStats `metric:"kafka.writer.batch.size"` + DialTime DurationStats `metric:"kafka.writer.dial.seconds"` + WriteTime DurationStats `metric:"kafka.writer.write.seconds"` + WaitTime DurationStats `metric:"kafka.writer.wait.seconds"` + Retries SummaryStats `metric:"kafka.writer.retries.count"` + BatchSize SummaryStats `metric:"kafka.writer.batch.size"` + BatchBytes SummaryStats `metric:"kafka.writer.batch.bytes"` MaxAttempts int64 `metric:"kafka.writer.attempts.max" type:"gauge"` MaxBatchSize int64 `metric:"kafka.writer.batch.max" type:"gauge"` @@ -146,27 +167,39 @@ type WriterStats struct { // This is easily accomplished by always allocating this struct directly, (i.e. using a pointer to the struct). // See https://golang.org/pkg/sync/atomic/#pkg-note-BUG type writerStats struct { - dials counter - writes counter - messages counter - bytes counter - rebalances counter - errors counter - dialTime summary - writeTime summary - waitTime summary - retries summary - batchSize summary + dials counter + writes counter + messages counter + bytes counter + rebalances counter + errors counter + dialTime summary + writeTime summary + waitTime summary + retries summary + batchSize summary + batchSizeBytes summary } -// NewWriter creates and returns a new Writer configured with config. -func NewWriter(config WriterConfig) *Writer { +// Validate method validates WriterConfig properties. +func (config *WriterConfig) Validate() error { + if len(config.Brokers) == 0 { - panic("cannot create a kafka writer with an empty list of brokers") + return errors.New("cannot create a kafka writer with an empty list of brokers") } if len(config.Topic) == 0 { - panic("cannot create a kafka writer with an empty topic") + return errors.New("cannot create a kafka writer with an empty topic") + } + + return nil +} + +// NewWriter creates and returns a new Writer configured with config. +func NewWriter(config WriterConfig) *Writer { + + if err := config.Validate(); err != nil { + panic(err) } if config.Dialer == nil { @@ -195,6 +228,11 @@ func NewWriter(config WriterConfig) *Writer { config.BatchSize = 100 } + if config.BatchBytes == 0 { + // 1048576 == 1MB which is the Kafka default. + config.BatchBytes = 1048576 + } + if config.BatchTimeout == 0 { config.BatchTimeout = 1 * time.Second } @@ -235,6 +273,15 @@ func NewWriter(config WriterConfig) *Writer { // blocks until all messages have been written, or until the maximum number of // attempts was reached. // +// When sending synchronously and the writer's batch size is configured to be +// greater than 1, this method blocks until either a full batch can be assembled +// or the batch timeout is reached. The batch size and timeouts are evaluated +// per partition, so the choice of Balancer can also influence the flushing +// behavior. For example, the Hash balancer will require on average N * batch +// size messages to trigger a flush where N is the number of partitions. The +// best way to achieve good batching behavior is to share one Writer amongst +// multiple go routines. +// // When the method returns an error, there's no way to know yet which messages // have succeeded of failed. // @@ -248,9 +295,11 @@ func (w *Writer) WriteMessages(ctx context.Context, msgs ...Message) error { return nil } - var res = make(chan error, len(msgs)) var err error - + var res chan error + if !w.config.Async { + res = make(chan error, len(msgs)) + } t0 := time.Now() for attempt := 0; attempt < w.config.MaxAttempts; attempt++ { @@ -261,7 +310,15 @@ func (w *Writer) WriteMessages(ctx context.Context, msgs ...Message) error { return io.ErrClosedPipe } - for _, msg := range msgs { + for i, msg := range msgs { + if int(msg.message(nil).size()) > w.config.BatchBytes { + err := MessageTooLargeError{ + Message: msg, + Remaining: msgs[i+1:], + } + w.mutex.RUnlock() + return err + } select { case w.msgs <- writerMessage{ msg: msg, @@ -320,10 +377,7 @@ func (w *Writer) WriteMessages(ctx context.Context, msgs ...Message) error { break } } - - t1 := time.Now() - w.stats.writeTime.observeDuration(t1.Sub(t0)) - + w.stats.writeTime.observeDuration(time.Since(t0)) return err } @@ -347,6 +401,7 @@ func (w *Writer) Stats() WriterStats { WaitTime: w.stats.waitTime.snapshotDuration(), Retries: w.stats.retries.snapshot(), BatchSize: w.stats.batchSize.snapshot(), + BatchBytes: w.stats.batchSizeBytes.snapshot(), MaxAttempts: int64(w.config.MaxAttempts), MaxBatchSize: int64(w.config.BatchSize), BatchTimeout: w.config.BatchTimeout, @@ -429,8 +484,9 @@ func (w *Writer) run() { if err == nil { err = fmt.Errorf("failed to find any partitions for topic %s", w.config.Topic) } - - wm.res <- err + if wm.res != nil { + wm.res <- &writerError{msg: wm.msg, err: err} + } } case <-ticker.C: @@ -492,31 +548,39 @@ type partitionWriter interface { } type writer struct { - brokers []string - topic string - partition int - requiredAcks int - batchSize int - batchTimeout time.Duration - writeTimeout time.Duration - dialer *Dialer - msgs chan writerMessage - join sync.WaitGroup - stats *writerStats + brokers []string + topic string + partition int + requiredAcks int + batchSize int + maxMessageBytes int + batchTimeout time.Duration + writeTimeout time.Duration + dialer *Dialer + msgs chan writerMessage + join sync.WaitGroup + stats *writerStats + codec CompressionCodec + logger *log.Logger + errorLogger *log.Logger } func newWriter(partition int, config WriterConfig, stats *writerStats) *writer { w := &writer{ - brokers: config.Brokers, - topic: config.Topic, - partition: partition, - requiredAcks: config.RequiredAcks, - batchSize: config.BatchSize, - batchTimeout: config.BatchTimeout, - writeTimeout: config.WriteTimeout, - dialer: config.Dialer, - msgs: make(chan writerMessage, config.QueueCapacity), - stats: stats, + brokers: config.Brokers, + topic: config.Topic, + partition: partition, + requiredAcks: config.RequiredAcks, + batchSize: config.BatchSize, + maxMessageBytes: config.BatchBytes, + batchTimeout: config.BatchTimeout, + writeTimeout: config.WriteTimeout, + dialer: config.Dialer, + msgs: make(chan writerMessage, config.QueueCapacity), + stats: stats, + codec: config.CompressionCodec, + logger: config.Logger, + errorLogger: config.ErrorLogger, } w.join.Add(1) go w.run() @@ -532,17 +596,34 @@ func (w *writer) messages() chan<- writerMessage { return w.msgs } +func (w *writer) withLogger(do func(*log.Logger)) { + if w.logger != nil { + do(w.logger) + } +} + +func (w *writer) withErrorLogger(do func(*log.Logger)) { + if w.errorLogger != nil { + do(w.errorLogger) + } else { + w.withLogger(do) + } +} + func (w *writer) run() { defer w.join.Done() - ticker := time.NewTicker(w.batchTimeout / 10) - defer ticker.Stop() + batchTimer := time.NewTimer(0) + <-batchTimer.C + batchTimerRunning := false + defer batchTimer.Stop() var conn *Conn var done bool var batch = make([]Message, 0, w.batchSize) var resch = make([](chan<- error), 0, w.batchSize) - var lastFlushAt = time.Now() + var lastMsg writerMessage + var batchSizeBytes int defer func() { if conn != nil { @@ -552,28 +633,60 @@ func (w *writer) run() { for !done { var mustFlush bool - + // lstMsg gets set when the next message would put the maxMessageBytes over the limit. + // If a lstMsg exists we need to add it to the batch so we don't lose it. + if len(lastMsg.msg.Value) != 0 { + batch = append(batch, lastMsg.msg) + if lastMsg.res != nil { + resch = append(resch, lastMsg.res) + } + batchSizeBytes += int(lastMsg.msg.message(nil).size()) + lastMsg = writerMessage{} + if !batchTimerRunning { + batchTimer.Reset(w.batchTimeout) + batchTimerRunning = true + } + } select { case wm, ok := <-w.msgs: if !ok { done, mustFlush = true, true } else { + if int(wm.msg.message(nil).size())+batchSizeBytes > w.maxMessageBytes { + // If the size of the current message puts us over the maxMessageBytes limit, + // store the message but don't send it in this batch. + mustFlush = true + lastMsg = wm + break + } batch = append(batch, wm.msg) - resch = append(resch, wm.res) - mustFlush = len(batch) >= w.batchSize + if wm.res != nil { + resch = append(resch, wm.res) + } + batchSizeBytes += int(wm.msg.message(nil).size()) + mustFlush = len(batch) >= w.batchSize || batchSizeBytes >= w.maxMessageBytes + } + if !batchTimerRunning { + batchTimer.Reset(w.batchTimeout) + batchTimerRunning = true } - case now := <-ticker.C: - mustFlush = now.Sub(lastFlushAt) > w.batchTimeout + case <-batchTimer.C: + mustFlush = true + batchTimerRunning = false } if mustFlush { - lastFlushAt = time.Now() - + w.stats.batchSizeBytes.observe(int64(batchSizeBytes)) + if batchTimerRunning { + if stopped := batchTimer.Stop(); !stopped { + <-batchTimer.C + } + batchTimerRunning = false + } if len(batch) == 0 { continue } - var err error if conn, err = w.write(conn, batch, resch); err != nil { if conn != nil { @@ -581,7 +694,6 @@ func (w *writer) run() { conn = nil } } - for i := range batch { batch[i] = Message{} } @@ -589,9 +701,9 @@ func (w *writer) run() { for i := range resch { resch[i] = nil } - batch = batch[:0] resch = resch[:0] + batchSizeBytes = 0 } } } @@ -615,6 +727,9 @@ func (w *writer) write(conn *Conn, batch []Message, resch [](chan<- error)) (ret if conn == nil { if conn, err = w.dial(); err != nil { w.stats.errors.observe(1) + w.withErrorLogger(func(logger *log.Logger) { + logger.Printf("error dialing kafka brokers for topic %s (partition %d): %s", w.topic, w.partition, err) + }) for i, res := range resch { res <- &writerError{msg: batch[i], err: err} } @@ -624,9 +739,11 @@ func (w *writer) write(conn *Conn, batch []Message, resch [](chan<- error)) (ret t0 := time.Now() conn.SetWriteDeadline(time.Now().Add(w.writeTimeout)) - - if _, err = conn.WriteMessages(batch...); err != nil { + if _, err = conn.WriteCompressedMessages(w.codec, batch...); err != nil { w.stats.errors.observe(1) + w.withErrorLogger(func(logger *log.Logger) { + logger.Printf("error writing messages to %s (partition %d): %s", w.topic, w.partition, err) + }) for i, res := range resch { res <- &writerError{msg: batch[i], err: err} } @@ -639,7 +756,6 @@ func (w *writer) write(conn *Conn, batch []Message, resch [](chan<- error)) (ret res <- nil } } - t1 := time.Now() w.stats.waitTime.observeDuration(t1.Sub(t0)) w.stats.batchSize.observe(int64(len(batch))) diff --git a/vendor/k8s.io/client-go/1.5/kubernetes/typed/extensions/v1beta1/fake/fake_thirdpartyresource.go b/vendor/k8s.io/client-go/1.5/kubernetes/typed/extensions/v1beta1/fake/fake_thirdpartyresource.go deleted file mode 100644 index 2f482c7d6..000000000 --- a/vendor/k8s.io/client-go/1.5/kubernetes/typed/extensions/v1beta1/fake/fake_thirdpartyresource.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - api "k8s.io/client-go/1.5/pkg/api" - unversioned "k8s.io/client-go/1.5/pkg/api/unversioned" - v1beta1 "k8s.io/client-go/1.5/pkg/apis/extensions/v1beta1" - labels "k8s.io/client-go/1.5/pkg/labels" - watch "k8s.io/client-go/1.5/pkg/watch" - testing "k8s.io/client-go/1.5/testing" -) - -// FakeThirdPartyResources implements ThirdPartyResourceInterface -type FakeThirdPartyResources struct { - Fake *FakeExtensions -} - -var thirdpartyresourcesResource = unversioned.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "thirdpartyresources"} - -func (c *FakeThirdPartyResources) Create(thirdPartyResource *v1beta1.ThirdPartyResource) (result *v1beta1.ThirdPartyResource, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(thirdpartyresourcesResource, thirdPartyResource), &v1beta1.ThirdPartyResource{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ThirdPartyResource), err -} - -func (c *FakeThirdPartyResources) Update(thirdPartyResource *v1beta1.ThirdPartyResource) (result *v1beta1.ThirdPartyResource, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(thirdpartyresourcesResource, thirdPartyResource), &v1beta1.ThirdPartyResource{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ThirdPartyResource), err -} - -func (c *FakeThirdPartyResources) Delete(name string, options *api.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(thirdpartyresourcesResource, name), &v1beta1.ThirdPartyResource{}) - return err -} - -func (c *FakeThirdPartyResources) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(thirdpartyresourcesResource, listOptions) - - _, err := c.Fake.Invokes(action, &v1beta1.ThirdPartyResourceList{}) - return err -} - -func (c *FakeThirdPartyResources) Get(name string) (result *v1beta1.ThirdPartyResource, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(thirdpartyresourcesResource, name), &v1beta1.ThirdPartyResource{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ThirdPartyResource), err -} - -func (c *FakeThirdPartyResources) List(opts api.ListOptions) (result *v1beta1.ThirdPartyResourceList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(thirdpartyresourcesResource, opts), &v1beta1.ThirdPartyResourceList{}) - if obj == nil { - return nil, err - } - - label := opts.LabelSelector - if label == nil { - label = labels.Everything() - } - list := &v1beta1.ThirdPartyResourceList{} - for _, item := range obj.(*v1beta1.ThirdPartyResourceList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested thirdPartyResources. -func (c *FakeThirdPartyResources) Watch(opts api.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(thirdpartyresourcesResource, opts)) -} - -// Patch applies the patch and returns the patched thirdPartyResource. -func (c *FakeThirdPartyResources) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1beta1.ThirdPartyResource, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(thirdpartyresourcesResource, name, data, subresources...), &v1beta1.ThirdPartyResource{}) - if obj == nil { - return nil, err - } - return obj.(*v1beta1.ThirdPartyResource), err -}