From 6fddb7c62b39547e4c978093cabbe94c2856e8a6 Mon Sep 17 00:00:00 2001
From: Anthony Woods <awoods@raintank.io>
Date: Thu, 7 Jun 2018 10:43:42 +0800
Subject: [PATCH] set jaeger-client-go source path and updates vendor

---
 Gopkg.lock                                    |   4 +-
 Gopkg.toml                                    |   5 +
 .../Shopify/sarama/.github/CONTRIBUTING.md    |  31 --
 .../Shopify/sarama/.github/ISSUE_TEMPLATE.md  |  20 --
 .../Shopify/sarama/examples/README.md         |   9 -
 .../sarama/examples/http_server/.gitignore    |   2 -
 .../sarama/examples/http_server/README.md     |   7 -
 .../examples/http_server/http_server.go       | 247 --------------
 .../examples/http_server/http_server_test.go  | 109 ------
 .../github.com/Shopify/sarama/mocks/README.md |  13 -
 .../Shopify/sarama/mocks/async_producer.go    | 174 ----------
 .../sarama/mocks/async_producer_test.go       | 132 --------
 .../Shopify/sarama/mocks/consumer.go          | 315 ------------------
 .../Shopify/sarama/mocks/consumer_test.go     | 249 --------------
 .../github.com/Shopify/sarama/mocks/mocks.go  |  48 ---
 .../Shopify/sarama/mocks/sync_producer.go     | 157 ---------
 .../sarama/mocks/sync_producer_test.go        | 250 --------------
 .../github.com/Shopify/sarama/tools/README.md |  10 -
 .../tools/kafka-console-consumer/.gitignore   |   2 -
 .../tools/kafka-console-consumer/README.md    |  29 --
 .../kafka-console-consumer.go                 | 145 --------
 .../.gitignore                                |   2 -
 .../kafka-console-partitionconsumer/README.md |  28 --
 .../kafka-console-partitionconsumer.go        | 102 ------
 .../tools/kafka-console-producer/.gitignore   |   2 -
 .../tools/kafka-console-producer/README.md    |  34 --
 .../kafka-console-producer.go                 | 124 -------
 .../Shopify/sarama/vagrant/boot_cluster.sh    |  22 --
 .../Shopify/sarama/vagrant/create_topics.sh   |   8 -
 .../Shopify/sarama/vagrant/halt_cluster.sh    |  15 -
 .../Shopify/sarama/vagrant/install_cluster.sh |  49 ---
 .../Shopify/sarama/vagrant/kafka.conf         |   9 -
 .../Shopify/sarama/vagrant/provision.sh       |  15 -
 .../Shopify/sarama/vagrant/run_toxiproxy.sh   |  22 --
 .../Shopify/sarama/vagrant/server.properties  | 127 -------
 .../Shopify/sarama/vagrant/setup_services.sh  |  29 --
 .../Shopify/sarama/vagrant/toxiproxy.conf     |   6 -
 .../Shopify/sarama/vagrant/zookeeper.conf     |   7 -
 .../sarama/vagrant/zookeeper.properties       |  36 --
 vendor/github.com/docker/docker/vendor.conf   |   1 +
 vendor/github.com/pierrec/lz4/lz4c/main.go    | 108 ------
 .../pierrec/xxHash/xxHash64/example_test.go   |  21 --
 .../pierrec/xxHash/xxHash64/xxHash64.go       | 249 --------------
 .../pierrec/xxHash/xxHash64/xxHash64_test.go  | 150 ---------
 .../github.com/pierrec/xxHash/xxhsum/main.go  |  44 ---
 .../cmd/metrics-bench/metrics-bench.go        |  20 --
 .../cmd/metrics-example/metrics-example.go    | 154 ---------
 .../go-metrics/cmd/never-read/never-read.go   |  22 --
 .../github.com/rcrowley/go-metrics/exp/exp.go | 156 ---------
 .../rcrowley/go-metrics/librato/client.go     | 102 ------
 .../rcrowley/go-metrics/librato/librato.go    | 235 -------------
 .../rcrowley/go-metrics/stathat/stathat.go    |  69 ----
 52 files changed, 9 insertions(+), 3917 deletions(-)
 delete mode 100644 vendor/github.com/Shopify/sarama/.github/CONTRIBUTING.md
 delete mode 100644 vendor/github.com/Shopify/sarama/.github/ISSUE_TEMPLATE.md
 delete mode 100644 vendor/github.com/Shopify/sarama/examples/README.md
 delete mode 100644 vendor/github.com/Shopify/sarama/examples/http_server/.gitignore
 delete mode 100644 vendor/github.com/Shopify/sarama/examples/http_server/README.md
 delete mode 100644 vendor/github.com/Shopify/sarama/examples/http_server/http_server.go
 delete mode 100644 vendor/github.com/Shopify/sarama/examples/http_server/http_server_test.go
 delete mode 100644 vendor/github.com/Shopify/sarama/mocks/README.md
 delete mode 100644 vendor/github.com/Shopify/sarama/mocks/async_producer.go
 delete mode 100644 vendor/github.com/Shopify/sarama/mocks/async_producer_test.go
 delete mode 100644 vendor/github.com/Shopify/sarama/mocks/consumer.go
 delete mode 100644 vendor/github.com/Shopify/sarama/mocks/consumer_test.go
 delete mode 100644 vendor/github.com/Shopify/sarama/mocks/mocks.go
 delete mode 100644 vendor/github.com/Shopify/sarama/mocks/sync_producer.go
 delete mode 100644 vendor/github.com/Shopify/sarama/mocks/sync_producer_test.go
 delete mode 100644 vendor/github.com/Shopify/sarama/tools/README.md
 delete mode 100644 vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/.gitignore
 delete mode 100644 vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/README.md
 delete mode 100644 vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/kafka-console-consumer.go
 delete mode 100644 vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/.gitignore
 delete mode 100644 vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/README.md
 delete mode 100644 vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/kafka-console-partitionconsumer.go
 delete mode 100644 vendor/github.com/Shopify/sarama/tools/kafka-console-producer/.gitignore
 delete mode 100644 vendor/github.com/Shopify/sarama/tools/kafka-console-producer/README.md
 delete mode 100644 vendor/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go
 delete mode 100755 vendor/github.com/Shopify/sarama/vagrant/boot_cluster.sh
 delete mode 100755 vendor/github.com/Shopify/sarama/vagrant/create_topics.sh
 delete mode 100755 vendor/github.com/Shopify/sarama/vagrant/halt_cluster.sh
 delete mode 100755 vendor/github.com/Shopify/sarama/vagrant/install_cluster.sh
 delete mode 100644 vendor/github.com/Shopify/sarama/vagrant/kafka.conf
 delete mode 100755 vendor/github.com/Shopify/sarama/vagrant/provision.sh
 delete mode 100755 vendor/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh
 delete mode 100644 vendor/github.com/Shopify/sarama/vagrant/server.properties
 delete mode 100755 vendor/github.com/Shopify/sarama/vagrant/setup_services.sh
 delete mode 100644 vendor/github.com/Shopify/sarama/vagrant/toxiproxy.conf
 delete mode 100644 vendor/github.com/Shopify/sarama/vagrant/zookeeper.conf
 delete mode 100644 vendor/github.com/Shopify/sarama/vagrant/zookeeper.properties
 delete mode 100644 vendor/github.com/pierrec/lz4/lz4c/main.go
 delete mode 100644 vendor/github.com/pierrec/xxHash/xxHash64/example_test.go
 delete mode 100644 vendor/github.com/pierrec/xxHash/xxHash64/xxHash64.go
 delete mode 100644 vendor/github.com/pierrec/xxHash/xxHash64/xxHash64_test.go
 delete mode 100644 vendor/github.com/pierrec/xxHash/xxhsum/main.go
 delete mode 100644 vendor/github.com/rcrowley/go-metrics/cmd/metrics-bench/metrics-bench.go
 delete mode 100644 vendor/github.com/rcrowley/go-metrics/cmd/metrics-example/metrics-example.go
 delete mode 100644 vendor/github.com/rcrowley/go-metrics/cmd/never-read/never-read.go
 delete mode 100644 vendor/github.com/rcrowley/go-metrics/exp/exp.go
 delete mode 100644 vendor/github.com/rcrowley/go-metrics/librato/client.go
 delete mode 100644 vendor/github.com/rcrowley/go-metrics/librato/librato.go
 delete mode 100644 vendor/github.com/rcrowley/go-metrics/stathat/stathat.go

diff --git a/Gopkg.lock b/Gopkg.lock
index d265365168..d3c6456213 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -628,12 +628,14 @@
     "utils"
   ]
   revision = "ff3efa227b65e419701a4f48985379ca106a89e7"
+  source = "https://github.com/jaegertracing/jaeger-client-go.git"
   version = "v2.11.0"
 
 [[projects]]
   name = "github.com/uber/jaeger-lib"
   packages = ["metrics"]
   revision = "c48167d9cae5887393dd5e61efd06a4a48b7fbb3"
+  source = "https://github.com/jaegertracing/jaeger-lib.git"
   version = "v1.2.1"
 
 [[projects]]
@@ -762,6 +764,6 @@
 [solve-meta]
   analyzer-name = "dep"
   analyzer-version = 1
-  inputs-digest = "4310514673bc8e3d2d703fd1762e333e3453c7e895cbf08fcc919822414ed31a"
+  inputs-digest = "600fd0a5e3fcfefdd092618105d06142b5ce1aa8f384f0ac660a2da67fa6aba8"
   solver-name = "gps-cdcl"
   solver-version = 1
diff --git a/Gopkg.toml b/Gopkg.toml
index 0df5ddd8b9..c5b54dbde3 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -135,6 +135,11 @@
 
 [[constraint]]
   name = "github.com/uber/jaeger-client-go"
+  source = "https://github.com/jaegertracing/jaeger-client-go.git"
+
+[[override]]
+  name = "github.com/uber/jaeger-lib"
+  source = "https://github.com/jaegertracing/jaeger-lib.git"
 
 [[constraint]]
   name = "gopkg.in/macaron.v1"
diff --git a/vendor/github.com/Shopify/sarama/.github/CONTRIBUTING.md b/vendor/github.com/Shopify/sarama/.github/CONTRIBUTING.md
deleted file mode 100644
index b0f107cbc7..0000000000
--- a/vendor/github.com/Shopify/sarama/.github/CONTRIBUTING.md
+++ /dev/null
@@ -1,31 +0,0 @@
-# Contributing
-
-Contributions are always welcome, both reporting issues and submitting pull requests!
-
-### Reporting issues
-
-Please make sure to include any potentially useful information in the issue, so we can pinpoint the issue faster without going back and forth.
-
-- What SHA of Sarama are you running? If this is not the latest SHA on the master branch, please try if the problem persists with the latest version.
-- You can set `sarama.Logger` to a [log.Logger](http://golang.org/pkg/log/#Logger) instance to capture debug output. Please include it in your issue description.
-- Also look at the logs of the Kafka broker you are connected to. If you see anything out of the ordinary, please include it.
-
-Also, please include the following information about your environment, so we can help you faster:
-
-- What version of Kafka are you using?
-- What version of Go are you using?
-- What are the values of your Producer/Consumer/Client configuration?
-
-
-### Submitting pull requests
-
-We will gladly accept bug fixes, or additions to this library. Please fork this library, commit & push your changes, and open a pull request. Because this library is in production use by many people and applications, we code review all additions. To make the review process go as smooth as possible, please consider the following.
-
-- If you plan to work on something major, please open an issue to discuss the design first.
-- Don't break backwards compatibility. If you really have to, open an issue to discuss this first.
-- Make sure to use the `go fmt` command to format your code according to the standards. Even better, set up your editor to do this for you when saving.
-- Run [go vet](https://godoc.org/golang.org/x/tools/cmd/vet) to detect any suspicious constructs in your code that could be bugs.
-- Explicitly handle all error return values. If you really want to ignore an error value, you can assign it to `_`.You can use [errcheck](https://github.com/kisielk/errcheck) to verify whether you have handled all errors.
-- You may also want to run [golint](https://github.com/golang/lint) as well to detect style problems.
-- Add tests that cover the changes you made. Make sure to run `go test` with the `-race` argument to test for race conditions.
-- Make sure your code is supported by all the Go versions we support. You can rely on [Travis CI](https://travis-ci.org/Shopify/sarama) for testing older Go versions
diff --git a/vendor/github.com/Shopify/sarama/.github/ISSUE_TEMPLATE.md b/vendor/github.com/Shopify/sarama/.github/ISSUE_TEMPLATE.md
deleted file mode 100644
index 7ccafb6247..0000000000
--- a/vendor/github.com/Shopify/sarama/.github/ISSUE_TEMPLATE.md
+++ /dev/null
@@ -1,20 +0,0 @@
-##### Versions
-
-*Please specify real version numbers or git SHAs, not just "Latest" since that changes fairly regularly.*
-Sarama Version:
-Kafka Version:
-Go Version:
-
-##### Configuration
-
-What configuration values are you using for Sarama and Kafka?
-
-##### Logs
-
-When filing an issue please provide logs from Sarama and Kafka if at all
-possible. You can set `sarama.Logger` to a `log.Logger` to capture Sarama debug
-output.
-
-##### Problem Description
-
-
diff --git a/vendor/github.com/Shopify/sarama/examples/README.md b/vendor/github.com/Shopify/sarama/examples/README.md
deleted file mode 100644
index 85fecefd8d..0000000000
--- a/vendor/github.com/Shopify/sarama/examples/README.md
+++ /dev/null
@@ -1,9 +0,0 @@
-# Sarama examples
-
-This folder contains example applications to demonstrate the use of Sarama. For code snippet examples on how to use the different types in Sarama, see [Sarama's API documentation on godoc.org](https://godoc.org/github.com/Shopify/sarama)
-
-In these examples, we use `github.com/Shopify/sarama` as import path. We do this to ensure all the examples are up to date with the latest changes in Sarama. For your own applications, you may want to use `gopkg.in/Shopify/sarama.v1` to lock into a stable API version.
-
-#### HTTP server
-
-[http_server](./http_server) is a simple HTTP server uses both the sync producer to produce data as part of the request handling cycle, as well as the async producer to maintain an access log. It also uses the [mocks subpackage](https://godoc.org/github.com/Shopify/sarama/mocks) to test both.
diff --git a/vendor/github.com/Shopify/sarama/examples/http_server/.gitignore b/vendor/github.com/Shopify/sarama/examples/http_server/.gitignore
deleted file mode 100644
index 9f6ed425f9..0000000000
--- a/vendor/github.com/Shopify/sarama/examples/http_server/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-http_server
-http_server.test
diff --git a/vendor/github.com/Shopify/sarama/examples/http_server/README.md b/vendor/github.com/Shopify/sarama/examples/http_server/README.md
deleted file mode 100644
index 5ff2bc2533..0000000000
--- a/vendor/github.com/Shopify/sarama/examples/http_server/README.md
+++ /dev/null
@@ -1,7 +0,0 @@
-# HTTP server example
-
-This HTTP server example shows you how to use the AsyncProducer and SyncProducer, and how to test them using mocks. The server simply sends the data of the HTTP request's query string to Kafka, and send a 200 result if that succeeds. For every request, it will send an access log entry to Kafka as well in the background.
-
-If you need to know whether a message was successfully sent to the Kafka cluster before you can send your HTTP response, using the `SyncProducer` is probably the simplest way to achieve this. If you don't care, e.g. for the access log, using the `AsyncProducer` will let you fire and forget. You can send the HTTP response, while the message is being produced in the background.
-
-One important thing to note is that both the `SyncProducer` and `AsyncProducer` are **thread-safe**. Go's `http.Server` handles requests concurrently in different goroutines, but you can use a single producer safely. This will actually achieve efficiency gains as the producer will be able to batch messages from concurrent requests together.
diff --git a/vendor/github.com/Shopify/sarama/examples/http_server/http_server.go b/vendor/github.com/Shopify/sarama/examples/http_server/http_server.go
deleted file mode 100644
index b6d83c5dc9..0000000000
--- a/vendor/github.com/Shopify/sarama/examples/http_server/http_server.go
+++ /dev/null
@@ -1,247 +0,0 @@
-package main
-
-import (
-	"github.com/Shopify/sarama"
-
-	"crypto/tls"
-	"crypto/x509"
-	"encoding/json"
-	"flag"
-	"fmt"
-	"io/ioutil"
-	"log"
-	"net/http"
-	"os"
-	"strings"
-	"time"
-)
-
-var (
-	addr      = flag.String("addr", ":8080", "The address to bind to")
-	brokers   = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The Kafka brokers to connect to, as a comma separated list")
-	verbose   = flag.Bool("verbose", false, "Turn on Sarama logging")
-	certFile  = flag.String("certificate", "", "The optional certificate file for client authentication")
-	keyFile   = flag.String("key", "", "The optional key file for client authentication")
-	caFile    = flag.String("ca", "", "The optional certificate authority file for TLS client authentication")
-	verifySsl = flag.Bool("verify", false, "Optional verify ssl certificates chain")
-)
-
-func main() {
-	flag.Parse()
-
-	if *verbose {
-		sarama.Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags)
-	}
-
-	if *brokers == "" {
-		flag.PrintDefaults()
-		os.Exit(1)
-	}
-
-	brokerList := strings.Split(*brokers, ",")
-	log.Printf("Kafka brokers: %s", strings.Join(brokerList, ", "))
-
-	server := &Server{
-		DataCollector:     newDataCollector(brokerList),
-		AccessLogProducer: newAccessLogProducer(brokerList),
-	}
-	defer func() {
-		if err := server.Close(); err != nil {
-			log.Println("Failed to close server", err)
-		}
-	}()
-
-	log.Fatal(server.Run(*addr))
-}
-
-func createTlsConfiguration() (t *tls.Config) {
-	if *certFile != "" && *keyFile != "" && *caFile != "" {
-		cert, err := tls.LoadX509KeyPair(*certFile, *keyFile)
-		if err != nil {
-			log.Fatal(err)
-		}
-
-		caCert, err := ioutil.ReadFile(*caFile)
-		if err != nil {
-			log.Fatal(err)
-		}
-
-		caCertPool := x509.NewCertPool()
-		caCertPool.AppendCertsFromPEM(caCert)
-
-		t = &tls.Config{
-			Certificates:       []tls.Certificate{cert},
-			RootCAs:            caCertPool,
-			InsecureSkipVerify: *verifySsl,
-		}
-	}
-	// will be nil by default if nothing is provided
-	return t
-}
-
-type Server struct {
-	DataCollector     sarama.SyncProducer
-	AccessLogProducer sarama.AsyncProducer
-}
-
-func (s *Server) Close() error {
-	if err := s.DataCollector.Close(); err != nil {
-		log.Println("Failed to shut down data collector cleanly", err)
-	}
-
-	if err := s.AccessLogProducer.Close(); err != nil {
-		log.Println("Failed to shut down access log producer cleanly", err)
-	}
-
-	return nil
-}
-
-func (s *Server) Handler() http.Handler {
-	return s.withAccessLog(s.collectQueryStringData())
-}
-
-func (s *Server) Run(addr string) error {
-	httpServer := &http.Server{
-		Addr:    addr,
-		Handler: s.Handler(),
-	}
-
-	log.Printf("Listening for requests on %s...\n", addr)
-	return httpServer.ListenAndServe()
-}
-
-func (s *Server) collectQueryStringData() http.Handler {
-	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
-		if r.URL.Path != "/" {
-			http.NotFound(w, r)
-			return
-		}
-
-		// We are not setting a message key, which means that all messages will
-		// be distributed randomly over the different partitions.
-		partition, offset, err := s.DataCollector.SendMessage(&sarama.ProducerMessage{
-			Topic: "important",
-			Value: sarama.StringEncoder(r.URL.RawQuery),
-		})
-
-		if err != nil {
-			w.WriteHeader(http.StatusInternalServerError)
-			fmt.Fprintf(w, "Failed to store your data:, %s", err)
-		} else {
-			// The tuple (topic, partition, offset) can be used as a unique identifier
-			// for a message in a Kafka cluster.
-			fmt.Fprintf(w, "Your data is stored with unique identifier important/%d/%d", partition, offset)
-		}
-	})
-}
-
-type accessLogEntry struct {
-	Method       string  `json:"method"`
-	Host         string  `json:"host"`
-	Path         string  `json:"path"`
-	IP           string  `json:"ip"`
-	ResponseTime float64 `json:"response_time"`
-
-	encoded []byte
-	err     error
-}
-
-func (ale *accessLogEntry) ensureEncoded() {
-	if ale.encoded == nil && ale.err == nil {
-		ale.encoded, ale.err = json.Marshal(ale)
-	}
-}
-
-func (ale *accessLogEntry) Length() int {
-	ale.ensureEncoded()
-	return len(ale.encoded)
-}
-
-func (ale *accessLogEntry) Encode() ([]byte, error) {
-	ale.ensureEncoded()
-	return ale.encoded, ale.err
-}
-
-func (s *Server) withAccessLog(next http.Handler) http.Handler {
-
-	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
-		started := time.Now()
-
-		next.ServeHTTP(w, r)
-
-		entry := &accessLogEntry{
-			Method:       r.Method,
-			Host:         r.Host,
-			Path:         r.RequestURI,
-			IP:           r.RemoteAddr,
-			ResponseTime: float64(time.Since(started)) / float64(time.Second),
-		}
-
-		// We will use the client's IP address as key. This will cause
-		// all the access log entries of the same IP address to end up
-		// on the same partition.
-		s.AccessLogProducer.Input() <- &sarama.ProducerMessage{
-			Topic: "access_log",
-			Key:   sarama.StringEncoder(r.RemoteAddr),
-			Value: entry,
-		}
-	})
-}
-
-func newDataCollector(brokerList []string) sarama.SyncProducer {
-
-	// For the data collector, we are looking for strong consistency semantics.
-	// Because we don't change the flush settings, sarama will try to produce messages
-	// as fast as possible to keep latency low.
-	config := sarama.NewConfig()
-	config.Producer.RequiredAcks = sarama.WaitForAll // Wait for all in-sync replicas to ack the message
-	config.Producer.Retry.Max = 10                   // Retry up to 10 times to produce the message
-	config.Producer.Return.Successes = true
-	tlsConfig := createTlsConfiguration()
-	if tlsConfig != nil {
-		config.Net.TLS.Config = tlsConfig
-		config.Net.TLS.Enable = true
-	}
-
-	// On the broker side, you may want to change the following settings to get
-	// stronger consistency guarantees:
-	// - For your broker, set `unclean.leader.election.enable` to false
-	// - For the topic, you could increase `min.insync.replicas`.
-
-	producer, err := sarama.NewSyncProducer(brokerList, config)
-	if err != nil {
-		log.Fatalln("Failed to start Sarama producer:", err)
-	}
-
-	return producer
-}
-
-func newAccessLogProducer(brokerList []string) sarama.AsyncProducer {
-
-	// For the access log, we are looking for AP semantics, with high throughput.
-	// By creating batches of compressed messages, we reduce network I/O at a cost of more latency.
-	config := sarama.NewConfig()
-	tlsConfig := createTlsConfiguration()
-	if tlsConfig != nil {
-		config.Net.TLS.Enable = true
-		config.Net.TLS.Config = tlsConfig
-	}
-	config.Producer.RequiredAcks = sarama.WaitForLocal       // Only wait for the leader to ack
-	config.Producer.Compression = sarama.CompressionSnappy   // Compress messages
-	config.Producer.Flush.Frequency = 500 * time.Millisecond // Flush batches every 500ms
-
-	producer, err := sarama.NewAsyncProducer(brokerList, config)
-	if err != nil {
-		log.Fatalln("Failed to start Sarama producer:", err)
-	}
-
-	// We will just log to STDOUT if we're not able to produce messages.
-	// Note: messages will only be returned here after all retry attempts are exhausted.
-	go func() {
-		for err := range producer.Errors() {
-			log.Println("Failed to write access log entry:", err)
-		}
-	}()
-
-	return producer
-}
diff --git a/vendor/github.com/Shopify/sarama/examples/http_server/http_server_test.go b/vendor/github.com/Shopify/sarama/examples/http_server/http_server_test.go
deleted file mode 100644
index 7b2451e282..0000000000
--- a/vendor/github.com/Shopify/sarama/examples/http_server/http_server_test.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package main
-
-import (
-	"io"
-	"net/http"
-	"net/http/httptest"
-	"testing"
-
-	"github.com/Shopify/sarama"
-	"github.com/Shopify/sarama/mocks"
-)
-
-// In normal operation, we expect one access log entry,
-// and one data collector entry. Let's assume both will succeed.
-// We should return a HTTP 200 status.
-func TestCollectSuccessfully(t *testing.T) {
-	dataCollectorMock := mocks.NewSyncProducer(t, nil)
-	dataCollectorMock.ExpectSendMessageAndSucceed()
-
-	accessLogProducerMock := mocks.NewAsyncProducer(t, nil)
-	accessLogProducerMock.ExpectInputAndSucceed()
-
-	// Now, use dependency injection to use the mocks.
-	s := &Server{
-		DataCollector:     dataCollectorMock,
-		AccessLogProducer: accessLogProducerMock,
-	}
-
-	// The Server's Close call is important; it will call Close on
-	// the two mock producers, which will then validate whether all
-	// expectations are resolved.
-	defer safeClose(t, s)
-
-	req, err := http.NewRequest("GET", "http://example.com/?data", nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	res := httptest.NewRecorder()
-	s.Handler().ServeHTTP(res, req)
-
-	if res.Code != 200 {
-		t.Errorf("Expected HTTP status 200, found %d", res.Code)
-	}
-
-	if string(res.Body.Bytes()) != "Your data is stored with unique identifier important/0/1" {
-		t.Error("Unexpected response body", res.Body)
-	}
-}
-
-// Now, let's see if we handle the case of not being able to produce
-// to the data collector properly. In this case we should return a 500 status.
-func TestCollectionFailure(t *testing.T) {
-	dataCollectorMock := mocks.NewSyncProducer(t, nil)
-	dataCollectorMock.ExpectSendMessageAndFail(sarama.ErrRequestTimedOut)
-
-	accessLogProducerMock := mocks.NewAsyncProducer(t, nil)
-	accessLogProducerMock.ExpectInputAndSucceed()
-
-	s := &Server{
-		DataCollector:     dataCollectorMock,
-		AccessLogProducer: accessLogProducerMock,
-	}
-	defer safeClose(t, s)
-
-	req, err := http.NewRequest("GET", "http://example.com/?data", nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	res := httptest.NewRecorder()
-	s.Handler().ServeHTTP(res, req)
-
-	if res.Code != 500 {
-		t.Errorf("Expected HTTP status 500, found %d", res.Code)
-	}
-}
-
-// We don't expect any data collector calls because the path is wrong,
-// so we are not setting any expectations on the dataCollectorMock. It
-// will still generate an access log entry though.
-func TestWrongPath(t *testing.T) {
-	dataCollectorMock := mocks.NewSyncProducer(t, nil)
-
-	accessLogProducerMock := mocks.NewAsyncProducer(t, nil)
-	accessLogProducerMock.ExpectInputAndSucceed()
-
-	s := &Server{
-		DataCollector:     dataCollectorMock,
-		AccessLogProducer: accessLogProducerMock,
-	}
-	defer safeClose(t, s)
-
-	req, err := http.NewRequest("GET", "http://example.com/wrong?data", nil)
-	if err != nil {
-		t.Fatal(err)
-	}
-	res := httptest.NewRecorder()
-
-	s.Handler().ServeHTTP(res, req)
-
-	if res.Code != 404 {
-		t.Errorf("Expected HTTP status 404, found %d", res.Code)
-	}
-}
-
-func safeClose(t *testing.T, o io.Closer) {
-	if err := o.Close(); err != nil {
-		t.Error(err)
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/mocks/README.md b/vendor/github.com/Shopify/sarama/mocks/README.md
deleted file mode 100644
index 55a6c2e61c..0000000000
--- a/vendor/github.com/Shopify/sarama/mocks/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
-# sarama/mocks
-
-The `mocks` subpackage includes mock implementations that implement the interfaces of the major sarama types.
-You can use them to test your sarama applications using dependency injection.
-
-The following mock objects are available:
-
-- [Consumer](https://godoc.org/github.com/Shopify/sarama/mocks#Consumer), which will create [PartitionConsumer](https://godoc.org/github.com/Shopify/sarama/mocks#PartitionConsumer) mocks.
-- [AsyncProducer](https://godoc.org/github.com/Shopify/sarama/mocks#AsyncProducer)
-- [SyncProducer](https://godoc.org/github.com/Shopify/sarama/mocks#SyncProducer)
-
-The mocks allow you to set expectations on them. When you close the mocks, the expectations will be verified,
-and the results will be reported to the `*testing.T` object you provided when creating the mock.
diff --git a/vendor/github.com/Shopify/sarama/mocks/async_producer.go b/vendor/github.com/Shopify/sarama/mocks/async_producer.go
deleted file mode 100644
index 24ae5c0d58..0000000000
--- a/vendor/github.com/Shopify/sarama/mocks/async_producer.go
+++ /dev/null
@@ -1,174 +0,0 @@
-package mocks
-
-import (
-	"sync"
-
-	"github.com/Shopify/sarama"
-)
-
-// AsyncProducer implements sarama's Producer interface for testing purposes.
-// Before you can send messages to it's Input channel, you have to set expectations
-// so it knows how to handle the input; it returns an error if the number of messages
-// received is bigger then the number of expectations set. You can also set a
-// function in each expectation so that the message value is checked by this function
-// and an error is returned if the match fails.
-type AsyncProducer struct {
-	l            sync.Mutex
-	t            ErrorReporter
-	expectations []*producerExpectation
-	closed       chan struct{}
-	input        chan *sarama.ProducerMessage
-	successes    chan *sarama.ProducerMessage
-	errors       chan *sarama.ProducerError
-	lastOffset   int64
-}
-
-// NewAsyncProducer instantiates a new Producer mock. The t argument should
-// be the *testing.T instance of your test method. An error will be written to it if
-// an expectation is violated. The config argument is used to determine whether it
-// should ack successes on the Successes channel.
-func NewAsyncProducer(t ErrorReporter, config *sarama.Config) *AsyncProducer {
-	if config == nil {
-		config = sarama.NewConfig()
-	}
-	mp := &AsyncProducer{
-		t:            t,
-		closed:       make(chan struct{}, 0),
-		expectations: make([]*producerExpectation, 0),
-		input:        make(chan *sarama.ProducerMessage, config.ChannelBufferSize),
-		successes:    make(chan *sarama.ProducerMessage, config.ChannelBufferSize),
-		errors:       make(chan *sarama.ProducerError, config.ChannelBufferSize),
-	}
-
-	go func() {
-		defer func() {
-			close(mp.successes)
-			close(mp.errors)
-		}()
-
-		for msg := range mp.input {
-			mp.l.Lock()
-			if mp.expectations == nil || len(mp.expectations) == 0 {
-				mp.expectations = nil
-				mp.t.Errorf("No more expectation set on this mock producer to handle the input message.")
-			} else {
-				expectation := mp.expectations[0]
-				mp.expectations = mp.expectations[1:]
-				if expectation.CheckFunction != nil {
-					if val, err := msg.Value.Encode(); err != nil {
-						mp.t.Errorf("Input message encoding failed: %s", err.Error())
-						mp.errors <- &sarama.ProducerError{Err: err, Msg: msg}
-					} else {
-						err = expectation.CheckFunction(val)
-						if err != nil {
-							mp.t.Errorf("Check function returned an error: %s", err.Error())
-							mp.errors <- &sarama.ProducerError{Err: err, Msg: msg}
-						}
-					}
-				}
-				if expectation.Result == errProduceSuccess {
-					mp.lastOffset++
-					if config.Producer.Return.Successes {
-						msg.Offset = mp.lastOffset
-						mp.successes <- msg
-					}
-				} else {
-					if config.Producer.Return.Errors {
-						mp.errors <- &sarama.ProducerError{Err: expectation.Result, Msg: msg}
-					}
-				}
-			}
-			mp.l.Unlock()
-		}
-
-		mp.l.Lock()
-		if len(mp.expectations) > 0 {
-			mp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(mp.expectations))
-		}
-		mp.l.Unlock()
-
-		close(mp.closed)
-	}()
-
-	return mp
-}
-
-////////////////////////////////////////////////
-// Implement Producer interface
-////////////////////////////////////////////////
-
-// AsyncClose corresponds with the AsyncClose method of sarama's Producer implementation.
-// By closing a mock producer, you also tell it that no more input will be provided, so it will
-// write an error to the test state if there's any remaining expectations.
-func (mp *AsyncProducer) AsyncClose() {
-	close(mp.input)
-}
-
-// Close corresponds with the Close method of sarama's Producer implementation.
-// By closing a mock producer, you also tell it that no more input will be provided, so it will
-// write an error to the test state if there's any remaining expectations.
-func (mp *AsyncProducer) Close() error {
-	mp.AsyncClose()
-	<-mp.closed
-	return nil
-}
-
-// Input corresponds with the Input method of sarama's Producer implementation.
-// You have to set expectations on the mock producer before writing messages to the Input
-// channel, so it knows how to handle them. If there is no more remaining expectations and
-// a messages is written to the Input channel, the mock producer will write an error to the test
-// state object.
-func (mp *AsyncProducer) Input() chan<- *sarama.ProducerMessage {
-	return mp.input
-}
-
-// Successes corresponds with the Successes method of sarama's Producer implementation.
-func (mp *AsyncProducer) Successes() <-chan *sarama.ProducerMessage {
-	return mp.successes
-}
-
-// Errors corresponds with the Errors method of sarama's Producer implementation.
-func (mp *AsyncProducer) Errors() <-chan *sarama.ProducerError {
-	return mp.errors
-}
-
-////////////////////////////////////////////////
-// Setting expectations
-////////////////////////////////////////////////
-
-// ExpectInputWithCheckerFunctionAndSucceed sets an expectation on the mock producer that a message
-// will be provided on the input channel. The mock producer will call the given function to check
-// the message value. If an error is returned it will be made available on the Errors channel
-// otherwise the mock will handle the message as if it produced successfully, i.e. it will make
-// it available on the Successes channel if the Producer.Return.Successes setting is set to true.
-func (mp *AsyncProducer) ExpectInputWithCheckerFunctionAndSucceed(cf ValueChecker) {
-	mp.l.Lock()
-	defer mp.l.Unlock()
-	mp.expectations = append(mp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf})
-}
-
-// ExpectInputWithCheckerFunctionAndFail sets an expectation on the mock producer that a message
-// will be provided on the input channel. The mock producer will first call the given function to
-// check the message value. If an error is returned it will be made available on the Errors channel
-// otherwise the mock will handle the message as if it failed to produce successfully. This means
-// it will make a ProducerError available on the Errors channel.
-func (mp *AsyncProducer) ExpectInputWithCheckerFunctionAndFail(cf ValueChecker, err error) {
-	mp.l.Lock()
-	defer mp.l.Unlock()
-	mp.expectations = append(mp.expectations, &producerExpectation{Result: err, CheckFunction: cf})
-}
-
-// ExpectInputAndSucceed sets an expectation on the mock producer that a message will be provided
-// on the input channel. The mock producer will handle the message as if it is produced successfully,
-// i.e. it will make it available on the Successes channel if the Producer.Return.Successes setting
-// is set to true.
-func (mp *AsyncProducer) ExpectInputAndSucceed() {
-	mp.ExpectInputWithCheckerFunctionAndSucceed(nil)
-}
-
-// ExpectInputAndFail sets an expectation on the mock producer that a message will be provided
-// on the input channel. The mock producer will handle the message as if it failed to produce
-// successfully. This means it will make a ProducerError available on the Errors channel.
-func (mp *AsyncProducer) ExpectInputAndFail(err error) {
-	mp.ExpectInputWithCheckerFunctionAndFail(nil, err)
-}
diff --git a/vendor/github.com/Shopify/sarama/mocks/async_producer_test.go b/vendor/github.com/Shopify/sarama/mocks/async_producer_test.go
deleted file mode 100644
index b5d92aad85..0000000000
--- a/vendor/github.com/Shopify/sarama/mocks/async_producer_test.go
+++ /dev/null
@@ -1,132 +0,0 @@
-package mocks
-
-import (
-	"errors"
-	"fmt"
-	"regexp"
-	"strings"
-	"testing"
-
-	"github.com/Shopify/sarama"
-)
-
-func generateRegexpChecker(re string) func([]byte) error {
-	return func(val []byte) error {
-		matched, err := regexp.MatchString(re, string(val))
-		if err != nil {
-			return errors.New("Error while trying to match the input message with the expected pattern: " + err.Error())
-		}
-		if !matched {
-			return fmt.Errorf("No match between input value \"%s\" and expected pattern \"%s\"", val, re)
-		}
-		return nil
-	}
-}
-
-type testReporterMock struct {
-	errors []string
-}
-
-func newTestReporterMock() *testReporterMock {
-	return &testReporterMock{errors: make([]string, 0)}
-}
-
-func (trm *testReporterMock) Errorf(format string, args ...interface{}) {
-	trm.errors = append(trm.errors, fmt.Sprintf(format, args...))
-}
-
-func TestMockAsyncProducerImplementsAsyncProducerInterface(t *testing.T) {
-	var mp interface{} = &AsyncProducer{}
-	if _, ok := mp.(sarama.AsyncProducer); !ok {
-		t.Error("The mock producer should implement the sarama.Producer interface.")
-	}
-}
-
-func TestProducerReturnsExpectationsToChannels(t *testing.T) {
-	config := sarama.NewConfig()
-	config.Producer.Return.Successes = true
-	mp := NewAsyncProducer(t, config)
-
-	mp.ExpectInputAndSucceed()
-	mp.ExpectInputAndSucceed()
-	mp.ExpectInputAndFail(sarama.ErrOutOfBrokers)
-
-	mp.Input() <- &sarama.ProducerMessage{Topic: "test 1"}
-	mp.Input() <- &sarama.ProducerMessage{Topic: "test 2"}
-	mp.Input() <- &sarama.ProducerMessage{Topic: "test 3"}
-
-	msg1 := <-mp.Successes()
-	msg2 := <-mp.Successes()
-	err1 := <-mp.Errors()
-
-	if msg1.Topic != "test 1" {
-		t.Error("Expected message 1 to be returned first")
-	}
-
-	if msg2.Topic != "test 2" {
-		t.Error("Expected message 2 to be returned second")
-	}
-
-	if err1.Msg.Topic != "test 3" || err1.Err != sarama.ErrOutOfBrokers {
-		t.Error("Expected message 3 to be returned as error")
-	}
-
-	if err := mp.Close(); err != nil {
-		t.Error(err)
-	}
-}
-
-func TestProducerWithTooFewExpectations(t *testing.T) {
-	trm := newTestReporterMock()
-	mp := NewAsyncProducer(trm, nil)
-	mp.ExpectInputAndSucceed()
-
-	mp.Input() <- &sarama.ProducerMessage{Topic: "test"}
-	mp.Input() <- &sarama.ProducerMessage{Topic: "test"}
-
-	if err := mp.Close(); err != nil {
-		t.Error(err)
-	}
-
-	if len(trm.errors) != 1 {
-		t.Error("Expected to report an error")
-	}
-}
-
-func TestProducerWithTooManyExpectations(t *testing.T) {
-	trm := newTestReporterMock()
-	mp := NewAsyncProducer(trm, nil)
-	mp.ExpectInputAndSucceed()
-	mp.ExpectInputAndFail(sarama.ErrOutOfBrokers)
-
-	mp.Input() <- &sarama.ProducerMessage{Topic: "test"}
-	if err := mp.Close(); err != nil {
-		t.Error(err)
-	}
-
-	if len(trm.errors) != 1 {
-		t.Error("Expected to report an error")
-	}
-}
-
-func TestProducerWithCheckerFunction(t *testing.T) {
-	trm := newTestReporterMock()
-	mp := NewAsyncProducer(trm, nil)
-	mp.ExpectInputWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes"))
-	mp.ExpectInputWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes$"))
-
-	mp.Input() <- &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
-	mp.Input() <- &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
-	if err := mp.Close(); err != nil {
-		t.Error(err)
-	}
-
-	if len(mp.Errors()) != 1 {
-		t.Error("Expected to report an error")
-	}
-
-	err1 := <-mp.Errors()
-	if !strings.HasPrefix(err1.Err.Error(), "No match") {
-		t.Error("Expected to report a value check error, found: ", err1.Err)
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/mocks/consumer.go b/vendor/github.com/Shopify/sarama/mocks/consumer.go
deleted file mode 100644
index 003d4d3e28..0000000000
--- a/vendor/github.com/Shopify/sarama/mocks/consumer.go
+++ /dev/null
@@ -1,315 +0,0 @@
-package mocks
-
-import (
-	"sync"
-	"sync/atomic"
-
-	"github.com/Shopify/sarama"
-)
-
-// Consumer implements sarama's Consumer interface for testing purposes.
-// Before you can start consuming from this consumer, you have to register
-// topic/partitions using ExpectConsumePartition, and set expectations on them.
-type Consumer struct {
-	l                  sync.Mutex
-	t                  ErrorReporter
-	config             *sarama.Config
-	partitionConsumers map[string]map[int32]*PartitionConsumer
-	metadata           map[string][]int32
-}
-
-// NewConsumer returns a new mock Consumer instance. The t argument should
-// be the *testing.T instance of your test method. An error will be written to it if
-// an expectation is violated. The config argument can be set to nil.
-func NewConsumer(t ErrorReporter, config *sarama.Config) *Consumer {
-	if config == nil {
-		config = sarama.NewConfig()
-	}
-
-	c := &Consumer{
-		t:                  t,
-		config:             config,
-		partitionConsumers: make(map[string]map[int32]*PartitionConsumer),
-	}
-	return c
-}
-
-///////////////////////////////////////////////////
-// Consumer interface implementation
-///////////////////////////////////////////////////
-
-// ConsumePartition implements the ConsumePartition method from the sarama.Consumer interface.
-// Before you can start consuming a partition, you have to set expectations on it using
-// ExpectConsumePartition. You can only consume a partition once per consumer.
-func (c *Consumer) ConsumePartition(topic string, partition int32, offset int64) (sarama.PartitionConsumer, error) {
-	c.l.Lock()
-	defer c.l.Unlock()
-
-	if c.partitionConsumers[topic] == nil || c.partitionConsumers[topic][partition] == nil {
-		c.t.Errorf("No expectations set for %s/%d", topic, partition)
-		return nil, errOutOfExpectations
-	}
-
-	pc := c.partitionConsumers[topic][partition]
-	if pc.consumed {
-		return nil, sarama.ConfigurationError("The topic/partition is already being consumed")
-	}
-
-	if pc.offset != AnyOffset && pc.offset != offset {
-		c.t.Errorf("Unexpected offset when calling ConsumePartition for %s/%d. Expected %d, got %d.", topic, partition, pc.offset, offset)
-	}
-
-	pc.consumed = true
-	return pc, nil
-}
-
-// Topics returns a list of topics, as registered with SetMetadata
-func (c *Consumer) Topics() ([]string, error) {
-	c.l.Lock()
-	defer c.l.Unlock()
-
-	if c.metadata == nil {
-		c.t.Errorf("Unexpected call to Topics. Initialize the mock's topic metadata with SetMetadata.")
-		return nil, sarama.ErrOutOfBrokers
-	}
-
-	var result []string
-	for topic := range c.metadata {
-		result = append(result, topic)
-	}
-	return result, nil
-}
-
-// Partitions returns the list of parititons for the given topic, as registered with SetMetadata
-func (c *Consumer) Partitions(topic string) ([]int32, error) {
-	c.l.Lock()
-	defer c.l.Unlock()
-
-	if c.metadata == nil {
-		c.t.Errorf("Unexpected call to Partitions. Initialize the mock's topic metadata with SetMetadata.")
-		return nil, sarama.ErrOutOfBrokers
-	}
-	if c.metadata[topic] == nil {
-		return nil, sarama.ErrUnknownTopicOrPartition
-	}
-
-	return c.metadata[topic], nil
-}
-
-func (c *Consumer) HighWaterMarks() map[string]map[int32]int64 {
-	c.l.Lock()
-	defer c.l.Unlock()
-
-	hwms := make(map[string]map[int32]int64, len(c.partitionConsumers))
-	for topic, partitionConsumers := range c.partitionConsumers {
-		hwm := make(map[int32]int64, len(partitionConsumers))
-		for partition, pc := range partitionConsumers {
-			hwm[partition] = pc.HighWaterMarkOffset()
-		}
-		hwms[topic] = hwm
-	}
-
-	return hwms
-}
-
-// Close implements the Close method from the sarama.Consumer interface. It will close
-// all registered PartitionConsumer instances.
-func (c *Consumer) Close() error {
-	c.l.Lock()
-	defer c.l.Unlock()
-
-	for _, partitions := range c.partitionConsumers {
-		for _, partitionConsumer := range partitions {
-			_ = partitionConsumer.Close()
-		}
-	}
-
-	return nil
-}
-
-///////////////////////////////////////////////////
-// Expectation API
-///////////////////////////////////////////////////
-
-// SetTopicMetadata sets the clusters topic/partition metadata,
-// which will be returned by Topics() and Partitions().
-func (c *Consumer) SetTopicMetadata(metadata map[string][]int32) {
-	c.l.Lock()
-	defer c.l.Unlock()
-
-	c.metadata = metadata
-}
-
-// ExpectConsumePartition will register a topic/partition, so you can set expectations on it.
-// The registered PartitionConsumer will be returned, so you can set expectations
-// on it using method chaining. Once a topic/partition is registered, you are
-// expected to start consuming it using ConsumePartition. If that doesn't happen,
-// an error will be written to the error reporter once the mock consumer is closed. It will
-// also expect that the
-func (c *Consumer) ExpectConsumePartition(topic string, partition int32, offset int64) *PartitionConsumer {
-	c.l.Lock()
-	defer c.l.Unlock()
-
-	if c.partitionConsumers[topic] == nil {
-		c.partitionConsumers[topic] = make(map[int32]*PartitionConsumer)
-	}
-
-	if c.partitionConsumers[topic][partition] == nil {
-		c.partitionConsumers[topic][partition] = &PartitionConsumer{
-			t:         c.t,
-			topic:     topic,
-			partition: partition,
-			offset:    offset,
-			messages:  make(chan *sarama.ConsumerMessage, c.config.ChannelBufferSize),
-			errors:    make(chan *sarama.ConsumerError, c.config.ChannelBufferSize),
-		}
-	}
-
-	return c.partitionConsumers[topic][partition]
-}
-
-///////////////////////////////////////////////////
-// PartitionConsumer mock type
-///////////////////////////////////////////////////
-
-// PartitionConsumer implements sarama's PartitionConsumer interface for testing purposes.
-// It is returned by the mock Consumers ConsumePartitionMethod, but only if it is
-// registered first using the Consumer's ExpectConsumePartition method. Before consuming the
-// Errors and Messages channel, you should specify what values will be provided on these
-// channels using YieldMessage and YieldError.
-type PartitionConsumer struct {
-	highWaterMarkOffset     int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
-	l                       sync.Mutex
-	t                       ErrorReporter
-	topic                   string
-	partition               int32
-	offset                  int64
-	messages                chan *sarama.ConsumerMessage
-	errors                  chan *sarama.ConsumerError
-	singleClose             sync.Once
-	consumed                bool
-	errorsShouldBeDrained   bool
-	messagesShouldBeDrained bool
-}
-
-///////////////////////////////////////////////////
-// PartitionConsumer interface implementation
-///////////////////////////////////////////////////
-
-// AsyncClose implements the AsyncClose method from the sarama.PartitionConsumer interface.
-func (pc *PartitionConsumer) AsyncClose() {
-	pc.singleClose.Do(func() {
-		close(pc.messages)
-		close(pc.errors)
-	})
-}
-
-// Close implements the Close method from the sarama.PartitionConsumer interface. It will
-// verify whether the partition consumer was actually started.
-func (pc *PartitionConsumer) Close() error {
-	if !pc.consumed {
-		pc.t.Errorf("Expectations set on %s/%d, but no partition consumer was started.", pc.topic, pc.partition)
-		return errPartitionConsumerNotStarted
-	}
-
-	if pc.errorsShouldBeDrained && len(pc.errors) > 0 {
-		pc.t.Errorf("Expected the errors channel for %s/%d to be drained on close, but found %d errors.", pc.topic, pc.partition, len(pc.errors))
-	}
-
-	if pc.messagesShouldBeDrained && len(pc.messages) > 0 {
-		pc.t.Errorf("Expected the messages channel for %s/%d to be drained on close, but found %d messages.", pc.topic, pc.partition, len(pc.messages))
-	}
-
-	pc.AsyncClose()
-
-	var (
-		closeErr error
-		wg       sync.WaitGroup
-	)
-
-	wg.Add(1)
-	go func() {
-		defer wg.Done()
-
-		var errs = make(sarama.ConsumerErrors, 0)
-		for err := range pc.errors {
-			errs = append(errs, err)
-		}
-
-		if len(errs) > 0 {
-			closeErr = errs
-		}
-	}()
-
-	wg.Add(1)
-	go func() {
-		defer wg.Done()
-		for range pc.messages {
-			// drain
-		}
-	}()
-
-	wg.Wait()
-	return closeErr
-}
-
-// Errors implements the Errors method from the sarama.PartitionConsumer interface.
-func (pc *PartitionConsumer) Errors() <-chan *sarama.ConsumerError {
-	return pc.errors
-}
-
-// Messages implements the Messages method from the sarama.PartitionConsumer interface.
-func (pc *PartitionConsumer) Messages() <-chan *sarama.ConsumerMessage {
-	return pc.messages
-}
-
-func (pc *PartitionConsumer) HighWaterMarkOffset() int64 {
-	return atomic.LoadInt64(&pc.highWaterMarkOffset) + 1
-}
-
-///////////////////////////////////////////////////
-// Expectation API
-///////////////////////////////////////////////////
-
-// YieldMessage will yield a messages Messages channel of this partition consumer
-// when it is consumed. By default, the mock consumer will not verify whether this
-// message was consumed from the Messages channel, because there are legitimate
-// reasons forthis not to happen. ou can call ExpectMessagesDrainedOnClose so it will
-// verify that the channel is empty on close.
-func (pc *PartitionConsumer) YieldMessage(msg *sarama.ConsumerMessage) {
-	pc.l.Lock()
-	defer pc.l.Unlock()
-
-	msg.Topic = pc.topic
-	msg.Partition = pc.partition
-	msg.Offset = atomic.AddInt64(&pc.highWaterMarkOffset, 1)
-
-	pc.messages <- msg
-}
-
-// YieldError will yield an error on the Errors channel of this partition consumer
-// when it is consumed. By default, the mock consumer will not verify whether this error was
-// consumed from the Errors channel, because there are legitimate reasons for this
-// not to happen. You can call ExpectErrorsDrainedOnClose so it will verify that
-// the channel is empty on close.
-func (pc *PartitionConsumer) YieldError(err error) {
-	pc.errors <- &sarama.ConsumerError{
-		Topic:     pc.topic,
-		Partition: pc.partition,
-		Err:       err,
-	}
-}
-
-// ExpectMessagesDrainedOnClose sets an expectation on the partition consumer
-// that the messages channel will be fully drained when Close is called. If this
-// expectation is not met, an error is reported to the error reporter.
-func (pc *PartitionConsumer) ExpectMessagesDrainedOnClose() {
-	pc.messagesShouldBeDrained = true
-}
-
-// ExpectErrorsDrainedOnClose sets an expectation on the partition consumer
-// that the errors channel will be fully drained when Close is called. If this
-// expectation is not met, an error is reported to the error reporter.
-func (pc *PartitionConsumer) ExpectErrorsDrainedOnClose() {
-	pc.errorsShouldBeDrained = true
-}
diff --git a/vendor/github.com/Shopify/sarama/mocks/consumer_test.go b/vendor/github.com/Shopify/sarama/mocks/consumer_test.go
deleted file mode 100644
index 311cfa0264..0000000000
--- a/vendor/github.com/Shopify/sarama/mocks/consumer_test.go
+++ /dev/null
@@ -1,249 +0,0 @@
-package mocks
-
-import (
-	"sort"
-	"testing"
-
-	"github.com/Shopify/sarama"
-)
-
-func TestMockConsumerImplementsConsumerInterface(t *testing.T) {
-	var c interface{} = &Consumer{}
-	if _, ok := c.(sarama.Consumer); !ok {
-		t.Error("The mock consumer should implement the sarama.Consumer interface.")
-	}
-
-	var pc interface{} = &PartitionConsumer{}
-	if _, ok := pc.(sarama.PartitionConsumer); !ok {
-		t.Error("The mock partitionconsumer should implement the sarama.PartitionConsumer interface.")
-	}
-}
-
-func TestConsumerHandlesExpectations(t *testing.T) {
-	consumer := NewConsumer(t, nil)
-	defer func() {
-		if err := consumer.Close(); err != nil {
-			t.Error(err)
-		}
-	}()
-
-	consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world")})
-	consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers)
-	consumer.ExpectConsumePartition("test", 1, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world again")})
-	consumer.ExpectConsumePartition("other", 0, AnyOffset).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello other")})
-
-	pc_test0, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest)
-	if err != nil {
-		t.Fatal(err)
-	}
-	test0_msg := <-pc_test0.Messages()
-	if test0_msg.Topic != "test" || test0_msg.Partition != 0 || string(test0_msg.Value) != "hello world" {
-		t.Error("Message was not as expected:", test0_msg)
-	}
-	test0_err := <-pc_test0.Errors()
-	if test0_err.Err != sarama.ErrOutOfBrokers {
-		t.Error("Expected sarama.ErrOutOfBrokers, found:", test0_err.Err)
-	}
-
-	pc_test1, err := consumer.ConsumePartition("test", 1, sarama.OffsetOldest)
-	if err != nil {
-		t.Fatal(err)
-	}
-	test1_msg := <-pc_test1.Messages()
-	if test1_msg.Topic != "test" || test1_msg.Partition != 1 || string(test1_msg.Value) != "hello world again" {
-		t.Error("Message was not as expected:", test1_msg)
-	}
-
-	pc_other0, err := consumer.ConsumePartition("other", 0, sarama.OffsetNewest)
-	if err != nil {
-		t.Fatal(err)
-	}
-	other0_msg := <-pc_other0.Messages()
-	if other0_msg.Topic != "other" || other0_msg.Partition != 0 || string(other0_msg.Value) != "hello other" {
-		t.Error("Message was not as expected:", other0_msg)
-	}
-}
-
-func TestConsumerReturnsNonconsumedErrorsOnClose(t *testing.T) {
-	consumer := NewConsumer(t, nil)
-	consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers)
-	consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldError(sarama.ErrOutOfBrokers)
-
-	pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	select {
-	case <-pc.Messages():
-		t.Error("Did not epxect a message on the messages channel.")
-	case err := <-pc.Errors():
-		if err.Err != sarama.ErrOutOfBrokers {
-			t.Error("Expected sarama.ErrOutOfBrokers, found", err)
-		}
-	}
-
-	errs := pc.Close().(sarama.ConsumerErrors)
-	if len(errs) != 1 && errs[0].Err != sarama.ErrOutOfBrokers {
-		t.Error("Expected Close to return the remaining sarama.ErrOutOfBrokers")
-	}
-}
-
-func TestConsumerWithoutExpectationsOnPartition(t *testing.T) {
-	trm := newTestReporterMock()
-	consumer := NewConsumer(trm, nil)
-
-	_, err := consumer.ConsumePartition("test", 1, sarama.OffsetOldest)
-	if err != errOutOfExpectations {
-		t.Error("Expected ConsumePartition to return errOutOfExpectations")
-	}
-
-	if err := consumer.Close(); err != nil {
-		t.Error("No error expected on close, but found:", err)
-	}
-
-	if len(trm.errors) != 1 {
-		t.Errorf("Expected an expectation failure to be set on the error reporter.")
-	}
-}
-
-func TestConsumerWithExpectationsOnUnconsumedPartition(t *testing.T) {
-	trm := newTestReporterMock()
-	consumer := NewConsumer(trm, nil)
-	consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest).YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello world")})
-
-	if err := consumer.Close(); err != nil {
-		t.Error("No error expected on close, but found:", err)
-	}
-
-	if len(trm.errors) != 1 {
-		t.Errorf("Expected an expectation failure to be set on the error reporter.")
-	}
-}
-
-func TestConsumerWithWrongOffsetExpectation(t *testing.T) {
-	trm := newTestReporterMock()
-	consumer := NewConsumer(trm, nil)
-	consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest)
-
-	_, err := consumer.ConsumePartition("test", 0, sarama.OffsetNewest)
-	if err != nil {
-		t.Error("Did not expect error, found:", err)
-	}
-
-	if len(trm.errors) != 1 {
-		t.Errorf("Expected an expectation failure to be set on the error reporter.")
-	}
-
-	if err := consumer.Close(); err != nil {
-		t.Error(err)
-	}
-}
-
-func TestConsumerViolatesMessagesDrainedExpectation(t *testing.T) {
-	trm := newTestReporterMock()
-	consumer := NewConsumer(trm, nil)
-	pcmock := consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest)
-	pcmock.YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello")})
-	pcmock.YieldMessage(&sarama.ConsumerMessage{Value: []byte("hello")})
-	pcmock.ExpectMessagesDrainedOnClose()
-
-	pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest)
-	if err != nil {
-		t.Error(err)
-	}
-
-	// consume first message, not second one
-	<-pc.Messages()
-
-	if err := consumer.Close(); err != nil {
-		t.Error(err)
-	}
-
-	if len(trm.errors) != 1 {
-		t.Errorf("Expected an expectation failure to be set on the error reporter.")
-	}
-}
-
-func TestConsumerMeetsErrorsDrainedExpectation(t *testing.T) {
-	trm := newTestReporterMock()
-	consumer := NewConsumer(trm, nil)
-
-	pcmock := consumer.ExpectConsumePartition("test", 0, sarama.OffsetOldest)
-	pcmock.YieldError(sarama.ErrInvalidMessage)
-	pcmock.YieldError(sarama.ErrInvalidMessage)
-	pcmock.ExpectErrorsDrainedOnClose()
-
-	pc, err := consumer.ConsumePartition("test", 0, sarama.OffsetOldest)
-	if err != nil {
-		t.Error(err)
-	}
-
-	// consume first and second error,
-	<-pc.Errors()
-	<-pc.Errors()
-
-	if err := consumer.Close(); err != nil {
-		t.Error(err)
-	}
-
-	if len(trm.errors) != 0 {
-		t.Errorf("Expected no expectation failures to be set on the error reporter.")
-	}
-}
-
-func TestConsumerTopicMetadata(t *testing.T) {
-	trm := newTestReporterMock()
-	consumer := NewConsumer(trm, nil)
-
-	consumer.SetTopicMetadata(map[string][]int32{
-		"test1": {0, 1, 2, 3},
-		"test2": {0, 1, 2, 3, 4, 5, 6, 7},
-	})
-
-	topics, err := consumer.Topics()
-	if err != nil {
-		t.Error(t)
-	}
-
-	sortedTopics := sort.StringSlice(topics)
-	sortedTopics.Sort()
-	if len(sortedTopics) != 2 || sortedTopics[0] != "test1" || sortedTopics[1] != "test2" {
-		t.Error("Unexpected topics returned:", sortedTopics)
-	}
-
-	partitions1, err := consumer.Partitions("test1")
-	if err != nil {
-		t.Error(t)
-	}
-
-	if len(partitions1) != 4 {
-		t.Error("Unexpected partitions returned:", len(partitions1))
-	}
-
-	partitions2, err := consumer.Partitions("test2")
-	if err != nil {
-		t.Error(t)
-	}
-
-	if len(partitions2) != 8 {
-		t.Error("Unexpected partitions returned:", len(partitions2))
-	}
-
-	if len(trm.errors) != 0 {
-		t.Errorf("Expected no expectation failures to be set on the error reporter.")
-	}
-}
-
-func TestConsumerUnexpectedTopicMetadata(t *testing.T) {
-	trm := newTestReporterMock()
-	consumer := NewConsumer(trm, nil)
-
-	if _, err := consumer.Topics(); err != sarama.ErrOutOfBrokers {
-		t.Error("Expected sarama.ErrOutOfBrokers, found", err)
-	}
-
-	if len(trm.errors) != 1 {
-		t.Errorf("Expected an expectation failure to be set on the error reporter.")
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/mocks/mocks.go b/vendor/github.com/Shopify/sarama/mocks/mocks.go
deleted file mode 100644
index 4adb838d99..0000000000
--- a/vendor/github.com/Shopify/sarama/mocks/mocks.go
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
-Package mocks provides mocks that can be used for testing applications
-that use Sarama. The mock types provided by this package implement the
-interfaces Sarama exports, so you can use them for dependency injection
-in your tests.
-
-All mock instances require you to set expectations on them before you
-can use them. It will determine how the mock will behave. If an
-expectation is not met, it will make your test fail.
-
-NOTE: this package currently does not fall under the API stability
-guarantee of Sarama as it is still considered experimental.
-*/
-package mocks
-
-import (
-	"errors"
-
-	"github.com/Shopify/sarama"
-)
-
-// ErrorReporter is a simple interface that includes the testing.T methods we use to report
-// expectation violations when using the mock objects.
-type ErrorReporter interface {
-	Errorf(string, ...interface{})
-}
-
-// ValueChecker is a function type to be set in each expectation of the producer mocks
-// to check the value passed.
-type ValueChecker func(val []byte) error
-
-var (
-	errProduceSuccess              error = nil
-	errOutOfExpectations                 = errors.New("No more expectations set on mock")
-	errPartitionConsumerNotStarted       = errors.New("The partition consumer was never started")
-)
-
-const AnyOffset int64 = -1000
-
-type producerExpectation struct {
-	Result        error
-	CheckFunction ValueChecker
-}
-
-type consumerExpectation struct {
-	Err error
-	Msg *sarama.ConsumerMessage
-}
diff --git a/vendor/github.com/Shopify/sarama/mocks/sync_producer.go b/vendor/github.com/Shopify/sarama/mocks/sync_producer.go
deleted file mode 100644
index 3f4986e2f8..0000000000
--- a/vendor/github.com/Shopify/sarama/mocks/sync_producer.go
+++ /dev/null
@@ -1,157 +0,0 @@
-package mocks
-
-import (
-	"sync"
-
-	"github.com/Shopify/sarama"
-)
-
-// SyncProducer implements sarama's SyncProducer interface for testing purposes.
-// Before you can use it, you have to set expectations on the mock SyncProducer
-// to tell it how to handle calls to SendMessage, so you can easily test success
-// and failure scenarios.
-type SyncProducer struct {
-	l            sync.Mutex
-	t            ErrorReporter
-	expectations []*producerExpectation
-	lastOffset   int64
-}
-
-// NewSyncProducer instantiates a new SyncProducer mock. The t argument should
-// be the *testing.T instance of your test method. An error will be written to it if
-// an expectation is violated. The config argument is currently unused, but is
-// maintained to be compatible with the async Producer.
-func NewSyncProducer(t ErrorReporter, config *sarama.Config) *SyncProducer {
-	return &SyncProducer{
-		t:            t,
-		expectations: make([]*producerExpectation, 0),
-	}
-}
-
-////////////////////////////////////////////////
-// Implement SyncProducer interface
-////////////////////////////////////////////////
-
-// SendMessage corresponds with the SendMessage method of sarama's SyncProducer implementation.
-// You have to set expectations on the mock producer before calling SendMessage, so it knows
-// how to handle them. You can set a function in each expectation so that the message value
-// checked by this function and an error is returned if the match fails.
-// If there is no more remaining expectation when SendMessage is called,
-// the mock producer will write an error to the test state object.
-func (sp *SyncProducer) SendMessage(msg *sarama.ProducerMessage) (partition int32, offset int64, err error) {
-	sp.l.Lock()
-	defer sp.l.Unlock()
-
-	if len(sp.expectations) > 0 {
-		expectation := sp.expectations[0]
-		sp.expectations = sp.expectations[1:]
-		if expectation.CheckFunction != nil {
-			val, err := msg.Value.Encode()
-			if err != nil {
-				sp.t.Errorf("Input message encoding failed: %s", err.Error())
-				return -1, -1, err
-			}
-
-			errCheck := expectation.CheckFunction(val)
-			if errCheck != nil {
-				sp.t.Errorf("Check function returned an error: %s", errCheck.Error())
-				return -1, -1, errCheck
-			}
-		}
-		if expectation.Result == errProduceSuccess {
-			sp.lastOffset++
-			msg.Offset = sp.lastOffset
-			return 0, msg.Offset, nil
-		}
-		return -1, -1, expectation.Result
-	}
-	sp.t.Errorf("No more expectation set on this mock producer to handle the input message.")
-	return -1, -1, errOutOfExpectations
-}
-
-// SendMessages corresponds with the SendMessages method of sarama's SyncProducer implementation.
-// You have to set expectations on the mock producer before calling SendMessages, so it knows
-// how to handle them. If there is no more remaining expectations when SendMessages is called,
-// the mock producer will write an error to the test state object.
-func (sp *SyncProducer) SendMessages(msgs []*sarama.ProducerMessage) error {
-	sp.l.Lock()
-	defer sp.l.Unlock()
-
-	if len(sp.expectations) >= len(msgs) {
-		expectations := sp.expectations[0:len(msgs)]
-		sp.expectations = sp.expectations[len(msgs):]
-
-		for i, expectation := range expectations {
-			if expectation.CheckFunction != nil {
-				val, err := msgs[i].Value.Encode()
-				if err != nil {
-					sp.t.Errorf("Input message encoding failed: %s", err.Error())
-					return err
-				}
-				errCheck := expectation.CheckFunction(val)
-				if errCheck != nil {
-					sp.t.Errorf("Check function returned an error: %s", errCheck.Error())
-					return errCheck
-				}
-			}
-			if expectation.Result != errProduceSuccess {
-				return expectation.Result
-			}
-		}
-		return nil
-	}
-	sp.t.Errorf("Insufficient expectations set on this mock producer to handle the input messages.")
-	return errOutOfExpectations
-}
-
-// Close corresponds with the Close method of sarama's SyncProducer implementation.
-// By closing a mock syncproducer, you also tell it that no more SendMessage calls will follow,
-// so it will write an error to the test state if there's any remaining expectations.
-func (sp *SyncProducer) Close() error {
-	sp.l.Lock()
-	defer sp.l.Unlock()
-
-	if len(sp.expectations) > 0 {
-		sp.t.Errorf("Expected to exhaust all expectations, but %d are left.", len(sp.expectations))
-	}
-
-	return nil
-}
-
-////////////////////////////////////////////////
-// Setting expectations
-////////////////////////////////////////////////
-
-// ExpectSendMessageWithCheckerFunctionAndSucceed sets an expectation on the mock producer that SendMessage
-// will be called. The mock producer will first call the given function to check the message value.
-// It will cascade the error of the function, if any, or handle the message as if it produced
-// successfully, i.e. by returning a valid partition, and offset, and a nil error.
-func (sp *SyncProducer) ExpectSendMessageWithCheckerFunctionAndSucceed(cf ValueChecker) {
-	sp.l.Lock()
-	defer sp.l.Unlock()
-	sp.expectations = append(sp.expectations, &producerExpectation{Result: errProduceSuccess, CheckFunction: cf})
-}
-
-// ExpectSendMessageWithCheckerFunctionAndFail sets an expectation on the mock producer that SendMessage will be
-// called. The mock producer will first call the given function to check the message value.
-// It will cascade the error of the function, if any, or handle the message as if it failed
-// to produce successfully, i.e. by returning the provided error.
-func (sp *SyncProducer) ExpectSendMessageWithCheckerFunctionAndFail(cf ValueChecker, err error) {
-	sp.l.Lock()
-	defer sp.l.Unlock()
-	sp.expectations = append(sp.expectations, &producerExpectation{Result: err, CheckFunction: cf})
-}
-
-// ExpectSendMessageAndSucceed sets an expectation on the mock producer that SendMessage will be
-// called. The mock producer will handle the message as if it produced successfully, i.e. by
-// returning a valid partition, and offset, and a nil error.
-func (sp *SyncProducer) ExpectSendMessageAndSucceed() {
-	sp.ExpectSendMessageWithCheckerFunctionAndSucceed(nil)
-}
-
-// ExpectSendMessageAndFail sets an expectation on the mock producer that SendMessage will be
-// called. The mock producer will handle the message as if it failed to produce
-// successfully, i.e. by returning the provided error.
-func (sp *SyncProducer) ExpectSendMessageAndFail(err error) {
-	sp.ExpectSendMessageWithCheckerFunctionAndFail(nil, err)
-}
diff --git a/vendor/github.com/Shopify/sarama/mocks/sync_producer_test.go b/vendor/github.com/Shopify/sarama/mocks/sync_producer_test.go
deleted file mode 100644
index bf2c71a195..0000000000
--- a/vendor/github.com/Shopify/sarama/mocks/sync_producer_test.go
+++ /dev/null
@@ -1,250 +0,0 @@
-package mocks
-
-import (
-	"errors"
-	"strings"
-	"testing"
-
-	"github.com/Shopify/sarama"
-)
-
-func TestMockSyncProducerImplementsSyncProducerInterface(t *testing.T) {
-	var mp interface{} = &SyncProducer{}
-	if _, ok := mp.(sarama.SyncProducer); !ok {
-		t.Error("The mock async producer should implement the sarama.SyncProducer interface.")
-	}
-}
-
-func TestSyncProducerReturnsExpectationsToSendMessage(t *testing.T) {
-	sp := NewSyncProducer(t, nil)
-	defer func() {
-		if err := sp.Close(); err != nil {
-			t.Error(err)
-		}
-	}()
-
-	sp.ExpectSendMessageAndSucceed()
-	sp.ExpectSendMessageAndSucceed()
-	sp.ExpectSendMessageAndFail(sarama.ErrOutOfBrokers)
-
-	msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
-
-	_, offset, err := sp.SendMessage(msg)
-	if err != nil {
-		t.Errorf("The first message should have been produced successfully, but got %s", err)
-	}
-	if offset != 1 || offset != msg.Offset {
-		t.Errorf("The first message should have been assigned offset 1, but got %d", msg.Offset)
-	}
-
-	_, offset, err = sp.SendMessage(msg)
-	if err != nil {
-		t.Errorf("The second message should have been produced successfully, but got %s", err)
-	}
-	if offset != 2 || offset != msg.Offset {
-		t.Errorf("The second message should have been assigned offset 2, but got %d", offset)
-	}
-
-	_, _, err = sp.SendMessage(msg)
-	if err != sarama.ErrOutOfBrokers {
-		t.Errorf("The third message should not have been produced successfully")
-	}
-
-	if err := sp.Close(); err != nil {
-		t.Error(err)
-	}
-}
-
-func TestSyncProducerWithTooManyExpectations(t *testing.T) {
-	trm := newTestReporterMock()
-
-	sp := NewSyncProducer(trm, nil)
-	sp.ExpectSendMessageAndSucceed()
-	sp.ExpectSendMessageAndFail(sarama.ErrOutOfBrokers)
-
-	msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
-	if _, _, err := sp.SendMessage(msg); err != nil {
-		t.Error("No error expected on first SendMessage call", err)
-	}
-
-	if err := sp.Close(); err != nil {
-		t.Error(err)
-	}
-
-	if len(trm.errors) != 1 {
-		t.Error("Expected to report an error")
-	}
-}
-
-func TestSyncProducerWithTooFewExpectations(t *testing.T) {
-	trm := newTestReporterMock()
-
-	sp := NewSyncProducer(trm, nil)
-	sp.ExpectSendMessageAndSucceed()
-
-	msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
-	if _, _, err := sp.SendMessage(msg); err != nil {
-		t.Error("No error expected on first SendMessage call", err)
-	}
-	if _, _, err := sp.SendMessage(msg); err != errOutOfExpectations {
-		t.Error("errOutOfExpectations expected on second SendMessage call, found:", err)
-	}
-
-	if err := sp.Close(); err != nil {
-		t.Error(err)
-	}
-
-	if len(trm.errors) != 1 {
-		t.Error("Expected to report an error")
-	}
-}
-
-func TestSyncProducerWithCheckerFunction(t *testing.T) {
-	trm := newTestReporterMock()
-
-	sp := NewSyncProducer(trm, nil)
-	sp.ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes"))
-	sp.ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes$"))
-
-	msg := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
-	if _, _, err := sp.SendMessage(msg); err != nil {
-		t.Error("No error expected on first SendMessage call, found: ", err)
-	}
-	msg = &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
-	if _, _, err := sp.SendMessage(msg); err == nil || !strings.HasPrefix(err.Error(), "No match") {
-		t.Error("Error during value check expected on second SendMessage call, found:", err)
-	}
-
-	if err := sp.Close(); err != nil {
-		t.Error(err)
-	}
-
-	if len(trm.errors) != 1 {
-		t.Error("Expected to report an error")
-	}
-}
-
-func TestSyncProducerWithCheckerFunctionForSendMessagesWithError(t *testing.T) {
-	trm := newTestReporterMock()
-
-	sp := NewSyncProducer(trm, nil)
-	sp.ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes"))
-	sp.ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes$"))
-
-	msg1 := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
-	msg2 := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
-	msgs := []*sarama.ProducerMessage{msg1, msg2}
-
-	if err := sp.SendMessages(msgs); err == nil || !strings.HasPrefix(err.Error(), "No match") {
-		t.Error("Error during value check expected on second message, found: ", err)
-	}
-
-	if err := sp.Close(); err != nil {
-		t.Error(err)
-	}
-
-	if len(trm.errors) != 1 {
-		t.Error("Expected to report an error")
-	}
-}
-
-func TestSyncProducerWithCheckerFunctionForSendMessagesWithoutError(t *testing.T) {
-	trm := newTestReporterMock()
-
-	sp := NewSyncProducer(trm, nil)
-	sp.ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes"))
-
-	msg1 := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
-	msgs := []*sarama.ProducerMessage{msg1}
-
-	if err := sp.SendMessages(msgs); err != nil {
-		t.Error("No error expected on SendMessages call, found: ", err)
-	}
-
-	if err := sp.Close(); err != nil {
-		t.Error(err)
-	}
-
-	if len(trm.errors) != 0 {
-		t.Errorf("Expected to not report any errors, found: %v", trm.errors)
-	}
-}
-
-func TestSyncProducerSendMessagesExpectationsMismatchTooFew(t *testing.T) {
-	trm := newTestReporterMock()
-
-	sp := NewSyncProducer(trm, nil)
-	sp.ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes"))
-
-	msg1 := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
-	msg2 := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
-
-	msgs := []*sarama.ProducerMessage{msg1, msg2}
-
-	if err := sp.SendMessages(msgs); err == nil {
-		t.Error("Error during value check expected on second message, found: ", err)
-	}
-
-	if err := sp.Close(); err != nil {
-		t.Error(err)
-	}
-
-	if len(trm.errors) != 2 {
-		t.Error("Expected to report 2 errors")
-	}
-}
-
-func TestSyncProducerSendMessagesExpectationsMismatchTooMany(t *testing.T) {
-	trm := newTestReporterMock()
-
-	sp := NewSyncProducer(trm, nil)
-	sp.ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes"))
-	sp.ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes"))
-
-	msg1 := &sarama.ProducerMessage{Topic: "test", Value: sarama.StringEncoder("test")}
-	msgs := []*sarama.ProducerMessage{msg1}
-
-	if err := sp.SendMessages(msgs); err != nil {
-		t.Error("No error expected on SendMessages call, found: ", err)
-	}
-
-	if err := sp.Close(); err != nil {
-		t.Error(err)
-	}
-
-	if len(trm.errors) != 1 {
-		t.Error("Expected to report 1 errors")
-	}
-}
-
-func TestSyncProducerSendMessagesFaultyEncoder(t *testing.T) {
-	trm := newTestReporterMock()
-
-	sp := NewSyncProducer(trm, nil)
-	sp.ExpectSendMessageWithCheckerFunctionAndSucceed(generateRegexpChecker("^tes"))
-
-	msg1 := &sarama.ProducerMessage{Topic: "test", Value: faultyEncoder("123")}
-	msgs := []*sarama.ProducerMessage{msg1}
-
-	if err := sp.SendMessages(msgs); err == nil || !strings.HasPrefix(err.Error(), "encode error") {
-		t.Error("Encoding error expected, found: ", err)
-	}
-
-	if err := sp.Close(); err != nil {
-		t.Error(err)
-	}
-
-	if len(trm.errors) != 1 {
-		t.Error("Expected to report 1 errors")
-	}
-}
-
-type faultyEncoder []byte
-
-func (f faultyEncoder) Encode() ([]byte, error) {
-	return nil, errors.New("encode error")
-}
-
-func (f faultyEncoder) Length() int {
-	return len(f)
-}
diff --git a/vendor/github.com/Shopify/sarama/tools/README.md b/vendor/github.com/Shopify/sarama/tools/README.md
deleted file mode 100644
index 3464c4ad80..0000000000
--- a/vendor/github.com/Shopify/sarama/tools/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
-# Sarama tools
-
-This folder contains applications that are useful for exploration of your Kafka cluster, or instrumentation.
-Some of these tools mirror tools that ship with Kafka, but these tools won't require installing the JVM to function.
-
-- [kafka-console-producer](./kafka-console-producer): a command line tool to produce a single message to your Kafka custer.
-- [kafka-console-partitionconsumer](./kafka-console-partitionconsumer): (deprecated) a command line tool to consume a single partition of a topic on your Kafka cluster.
-- [kafka-console-consumer](./kafka-console-consumer): a command line tool to consume arbitrary partitions of a topic on your Kafka cluster.
-
-To install all tools, run `go get github.com/Shopify/sarama/tools/...`
diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/.gitignore b/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/.gitignore
deleted file mode 100644
index 67da9dfa9f..0000000000
--- a/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-kafka-console-consumer
-kafka-console-consumer.test
diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/README.md b/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/README.md
deleted file mode 100644
index 4e77f0b705..0000000000
--- a/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/README.md
+++ /dev/null
@@ -1,29 +0,0 @@
-# kafka-console-consumer
-
-A simple command line tool to consume partitions of a topic and print the
-messages on the standard output.
-
-### Installation
-
-    go get github.com/Shopify/sarama/tools/kafka-console-consumer
-
-### Usage
-
-    # Minimum invocation
-    kafka-console-consumer -topic=test -brokers=kafka1:9092
-
-    # It will pick up a KAFKA_PEERS environment variable
-    export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092
-    kafka-console-consumer -topic=test
-
-    # You can specify the offset you want to start at. It can be either
-    # `oldest`, `newest`. The default is `newest`.
-    kafka-console-consumer -topic=test -offset=oldest
-    kafka-console-consumer -topic=test -offset=newest
-
-    # You can specify the partition(s) you want to consume as a comma-separated
-    # list. The default is `all`.
-    kafka-console-consumer -topic=test -partitions=1,2,3
-
-    # Display all command line options
-    kafka-console-consumer -help
diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/kafka-console-consumer.go b/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/kafka-console-consumer.go
deleted file mode 100644
index 0f1eb89a90..0000000000
--- a/vendor/github.com/Shopify/sarama/tools/kafka-console-consumer/kafka-console-consumer.go
+++ /dev/null
@@ -1,145 +0,0 @@
-package main
-
-import (
-	"flag"
-	"fmt"
-	"log"
-	"os"
-	"os/signal"
-	"strconv"
-	"strings"
-	"sync"
-
-	"github.com/Shopify/sarama"
-)
-
-var (
-	brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster")
-	topic      = flag.String("topic", "", "REQUIRED: the topic to consume")
-	partitions = flag.String("partitions", "all", "The partitions to consume, can be 'all' or comma-separated numbers")
-	offset     = flag.String("offset", "newest", "The offset to start with. Can be `oldest`, `newest`")
-	verbose    = flag.Bool("verbose", false, "Whether to turn on sarama logging")
-	bufferSize = flag.Int("buffer-size", 256, "The buffer size of the message channel.")
-
-	logger = log.New(os.Stderr, "", log.LstdFlags)
-)
-
-func main() {
-	flag.Parse()
-
-	if *brokerList == "" {
-		printUsageErrorAndExit("You have to provide -brokers as a comma-separated list, or set the KAFKA_PEERS environment variable.")
-	}
-
-	if *topic == "" {
-		printUsageErrorAndExit("-topic is required")
-	}
-
-	if *verbose {
-		sarama.Logger = logger
-	}
-
-	var initialOffset int64
-	switch *offset {
-	case "oldest":
-		initialOffset = sarama.OffsetOldest
-	case "newest":
-		initialOffset = sarama.OffsetNewest
-	default:
-		printUsageErrorAndExit("-offset should be `oldest` or `newest`")
-	}
-
-	c, err := sarama.NewConsumer(strings.Split(*brokerList, ","), nil)
-	if err != nil {
-		printErrorAndExit(69, "Failed to start consumer: %s", err)
-	}
-
-	partitionList, err := getPartitions(c)
-	if err != nil {
-		printErrorAndExit(69, "Failed to get the list of partitions: %s", err)
-	}
-
-	var (
-		messages = make(chan *sarama.ConsumerMessage, *bufferSize)
-		closing  = make(chan struct{})
-		wg       sync.WaitGroup
-	)
-
-	go func() {
-		signals := make(chan os.Signal, 1)
-		signal.Notify(signals, os.Kill, os.Interrupt)
-		<-signals
-		logger.Println("Initiating shutdown of consumer...")
-		close(closing)
-	}()
-
-	for _, partition := range partitionList {
-		pc, err := c.ConsumePartition(*topic, partition, initialOffset)
-		if err != nil {
-			printErrorAndExit(69, "Failed to start consumer for partition %d: %s", partition, err)
-		}
-
-		go func(pc sarama.PartitionConsumer) {
-			<-closing
-			pc.AsyncClose()
-		}(pc)
-
-		wg.Add(1)
-		go func(pc sarama.PartitionConsumer) {
-			defer wg.Done()
-			for message := range pc.Messages() {
-				messages <- message
-			}
-		}(pc)
-	}
-
-	go func() {
-		for msg := range messages {
-			fmt.Printf("Partition:\t%d\n", msg.Partition)
-			fmt.Printf("Offset:\t%d\n", msg.Offset)
-			fmt.Printf("Key:\t%s\n", string(msg.Key))
-			fmt.Printf("Value:\t%s\n", string(msg.Value))
-			fmt.Println()
-		}
-	}()
-
-	wg.Wait()
-	logger.Println("Done consuming topic", *topic)
-	close(messages)
-
-	if err := c.Close(); err != nil {
-		logger.Println("Failed to close consumer: ", err)
-	}
-}
-
-func getPartitions(c sarama.Consumer) ([]int32, error) {
-	if *partitions == "all" {
-		return c.Partitions(*topic)
-	}
-
-	tmp := strings.Split(*partitions, ",")
-	var pList []int32
-	for i := range tmp {
-		val, err := strconv.ParseInt(tmp[i], 10, 32)
-		if err != nil {
-			return nil, err
-		}
-		pList = append(pList, int32(val))
-	}
-
-	return pList, nil
-}
-
-func printErrorAndExit(code int, format string, values ...interface{}) {
-	fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
-	fmt.Fprintln(os.Stderr)
-	os.Exit(code)
-}
-
-func printUsageErrorAndExit(format string, values ...interface{}) {
-	fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
-	fmt.Fprintln(os.Stderr)
-	fmt.Fprintln(os.Stderr, "Available command line options:")
-	flag.PrintDefaults()
-	os.Exit(64)
-}
diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/.gitignore b/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/.gitignore
deleted file mode 100644
index 5837fe8ca6..0000000000
--- a/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-kafka-console-partitionconsumer
-kafka-console-partitionconsumer.test
diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/README.md b/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/README.md
deleted file mode 100644
index 646dd5f5c2..0000000000
--- a/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/README.md
+++ /dev/null
@@ -1,28 +0,0 @@
-# kafka-console-partitionconsumer
-
-NOTE: this tool is deprecated in favour of the more general and more powerful
-`kafka-console-consumer`.
-
-A simple command line tool to consume a partition of a topic and print the messages
-on the standard output.
-
-### Installation
-
-    go get github.com/Shopify/sarama/tools/kafka-console-partitionconsumer
-
-### Usage
-
-    # Minimum invocation
-    kafka-console-partitionconsumer -topic=test -partition=4 -brokers=kafka1:9092
-
-    # It will pick up a KAFKA_PEERS environment variable
-    export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092
-    kafka-console-partitionconsumer -topic=test -partition=4
-
-    # You can specify the offset you want to start at. It can be either
-    # `oldest`, `newest`, or a specific offset number
-    kafka-console-partitionconsumer -topic=test -partition=3 -offset=oldest
-    kafka-console-partitionconsumer -topic=test -partition=2 -offset=1337
-
-    # Display all command line options
-    kafka-console-partitionconsumer -help
diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/kafka-console-partitionconsumer.go b/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/kafka-console-partitionconsumer.go
deleted file mode 100644
index d5e4464de1..0000000000
--- a/vendor/github.com/Shopify/sarama/tools/kafka-console-partitionconsumer/kafka-console-partitionconsumer.go
+++ /dev/null
@@ -1,102 +0,0 @@
-package main
-
-import (
-	"flag"
-	"fmt"
-	"log"
-	"os"
-	"os/signal"
-	"strconv"
-	"strings"
-
-	"github.com/Shopify/sarama"
-)
-
-var (
-	brokerList = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster")
-	topic      = flag.String("topic", "", "REQUIRED: the topic to consume")
-	partition  = flag.Int("partition", -1, "REQUIRED: the partition to consume")
-	offset     = flag.String("offset", "newest", "The offset to start with. Can be `oldest`, `newest`, or an actual offset")
-	verbose    = flag.Bool("verbose", false, "Whether to turn on sarama logging")
-
-	logger = log.New(os.Stderr, "", log.LstdFlags)
-)
-
-func main() {
-	flag.Parse()
-
-	if *brokerList == "" {
-		printUsageErrorAndExit("You have to provide -brokers as a comma-separated list, or set the KAFKA_PEERS environment variable.")
-	}
-
-	if *topic == "" {
-		printUsageErrorAndExit("-topic is required")
-	}
-
-	if *partition == -1 {
-		printUsageErrorAndExit("-partition is required")
-	}
-
-	if *verbose {
-		sarama.Logger = logger
-	}
-
-	var (
-		initialOffset int64
-		offsetError   error
-	)
-	switch *offset {
-	case "oldest":
-		initialOffset = sarama.OffsetOldest
-	case "newest":
-		initialOffset = sarama.OffsetNewest
-	default:
-		initialOffset, offsetError = strconv.ParseInt(*offset, 10, 64)
-	}
-
-	if offsetError != nil {
-		printUsageErrorAndExit("Invalid initial offset: %s", *offset)
-	}
-
-	c, err := sarama.NewConsumer(strings.Split(*brokerList, ","), nil)
-	if err != nil {
-		printErrorAndExit(69, "Failed to start consumer: %s", err)
-	}
-
-	pc, err := c.ConsumePartition(*topic, int32(*partition), initialOffset)
-	if err != nil {
-		printErrorAndExit(69, "Failed to start partition consumer: %s", err)
-	}
-
-	go func() {
-		signals := make(chan os.Signal, 1)
-		signal.Notify(signals, os.Kill, os.Interrupt)
-		<-signals
-		pc.AsyncClose()
-	}()
-
-	for msg := range pc.Messages() {
-		fmt.Printf("Offset:\t%d\n", msg.Offset)
-		fmt.Printf("Key:\t%s\n", string(msg.Key))
-		fmt.Printf("Value:\t%s\n", string(msg.Value))
-		fmt.Println()
-	}
-
-	if err := c.Close(); err != nil {
-		logger.Println("Failed to close consumer: ", err)
-	}
-}
-
-func printErrorAndExit(code int, format string, values ...interface{}) {
-	fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
-	fmt.Fprintln(os.Stderr)
-	os.Exit(code)
-}
-
-func printUsageErrorAndExit(format string, values ...interface{}) {
-	fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
-	fmt.Fprintln(os.Stderr)
-	fmt.Fprintln(os.Stderr, "Available command line options:")
-	flag.PrintDefaults()
-	os.Exit(64)
-}
diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/.gitignore b/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/.gitignore
deleted file mode 100644
index 2b9e563a1c..0000000000
--- a/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-kafka-console-producer
-kafka-console-producer.test
diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/README.md b/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/README.md
deleted file mode 100644
index 6b3a65f211..0000000000
--- a/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/README.md
+++ /dev/null
@@ -1,34 +0,0 @@
-# kafka-console-producer
-
-A simple command line tool to produce a single message to Kafka.
-
-### Installation
-
-    go get github.com/Shopify/sarama/tools/kafka-console-producer
-
-
-### Usage
-
-    # Minimum invocation
-    kafka-console-producer -topic=test -value=value -brokers=kafka1:9092
-
-    # It will pick up a KAFKA_PEERS environment variable
-    export KAFKA_PEERS=kafka1:9092,kafka2:9092,kafka3:9092
-    kafka-console-producer -topic=test -value=value
-
-    # It will read the value from stdin by using pipes
-    echo "hello world" | kafka-console-producer -topic=test
-
-    # Specify a key:
-    echo "hello world" | kafka-console-producer -topic=test -key=key
-
-    # Partitioning: by default, kafka-console-producer will partition as follows:
-    # - manual partitioning if a -partition is provided
-    # - hash partitioning by key if a -key is provided
-    # - random partioning otherwise.
-    #
-    # You can override this using the -partitioner argument:
-    echo "hello world" | kafka-console-producer -topic=test -key=key -partitioner=random
-
-    # Display all command line options
-    kafka-console-producer -help
diff --git a/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go b/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go
deleted file mode 100644
index 83054ed78a..0000000000
--- a/vendor/github.com/Shopify/sarama/tools/kafka-console-producer/kafka-console-producer.go
+++ /dev/null
@@ -1,124 +0,0 @@
-package main
-
-import (
-	"flag"
-	"fmt"
-	"io/ioutil"
-	"log"
-	"os"
-	"strings"
-
-	"github.com/Shopify/sarama"
-	"github.com/rcrowley/go-metrics"
-)
-
-var (
-	brokerList  = flag.String("brokers", os.Getenv("KAFKA_PEERS"), "The comma separated list of brokers in the Kafka cluster. You can also set the KAFKA_PEERS environment variable")
-	topic       = flag.String("topic", "", "REQUIRED: the topic to produce to")
-	key         = flag.String("key", "", "The key of the message to produce. Can be empty.")
-	value       = flag.String("value", "", "REQUIRED: the value of the message to produce. You can also provide the value on stdin.")
-	partitioner = flag.String("partitioner", "", "The partitioning scheme to use. Can be `hash`, `manual`, or `random`")
-	partition   = flag.Int("partition", -1, "The partition to produce to.")
-	verbose     = flag.Bool("verbose", false, "Turn on sarama logging to stderr")
-	showMetrics = flag.Bool("metrics", false, "Output metrics on successful publish to stderr")
-	silent      = flag.Bool("silent", false, "Turn off printing the message's topic, partition, and offset to stdout")
-
-	logger = log.New(os.Stderr, "", log.LstdFlags)
-)
-
-func main() {
-	flag.Parse()
-
-	if *brokerList == "" {
-		printUsageErrorAndExit("no -brokers specified. Alternatively, set the KAFKA_PEERS environment variable")
-	}
-
-	if *topic == "" {
-		printUsageErrorAndExit("no -topic specified")
-	}
-
-	if *verbose {
-		sarama.Logger = logger
-	}
-
-	config := sarama.NewConfig()
-	config.Producer.RequiredAcks = sarama.WaitForAll
-	config.Producer.Return.Successes = true
-
-	switch *partitioner {
-	case "":
-		if *partition >= 0 {
-			config.Producer.Partitioner = sarama.NewManualPartitioner
-		} else {
-			config.Producer.Partitioner = sarama.NewHashPartitioner
-		}
-	case "hash":
-		config.Producer.Partitioner = sarama.NewHashPartitioner
-	case "random":
-		config.Producer.Partitioner = sarama.NewRandomPartitioner
-	case "manual":
-		config.Producer.Partitioner = sarama.NewManualPartitioner
-		if *partition == -1 {
-			printUsageErrorAndExit("-partition is required when partitioning manually")
-		}
-	default:
-		printUsageErrorAndExit(fmt.Sprintf("Partitioner %s not supported.", *partitioner))
-	}
-
-	message := &sarama.ProducerMessage{Topic: *topic, Partition: int32(*partition)}
-
-	if *key != "" {
-		message.Key = sarama.StringEncoder(*key)
-	}
-
-	if *value != "" {
-		message.Value = sarama.StringEncoder(*value)
-	} else if stdinAvailable() {
-		bytes, err := ioutil.ReadAll(os.Stdin)
-		if err != nil {
-			printErrorAndExit(66, "Failed to read data from the standard input: %s", err)
-		}
-		message.Value = sarama.ByteEncoder(bytes)
-	} else {
-		printUsageErrorAndExit("-value is required, or you have to provide the value on stdin")
-	}
-
-	producer, err := sarama.NewSyncProducer(strings.Split(*brokerList, ","), config)
-	if err != nil {
-		printErrorAndExit(69, "Failed to open Kafka producer: %s", err)
-	}
-	defer func() {
-		if err := producer.Close(); err != nil {
-			logger.Println("Failed to close Kafka producer cleanly:", err)
-		}
-	}()
-
-	partition, offset, err := producer.SendMessage(message)
-	if err != nil {
-		printErrorAndExit(69, "Failed to produce message: %s", err)
-	} else if !*silent {
-		fmt.Printf("topic=%s\tpartition=%d\toffset=%d\n", *topic, partition, offset)
-	}
-	if *showMetrics {
-		metrics.WriteOnce(config.MetricRegistry, os.Stderr)
-	}
-}
-
-func printErrorAndExit(code int, format string, values ...interface{}) {
-	fmt.Fprintf(os.Stderr, "ERROR: %s\n", fmt.Sprintf(format, values...))
-	fmt.Fprintln(os.Stderr)
-	os.Exit(code)
-}
-
-func printUsageErrorAndExit(message string) {
-	fmt.Fprintln(os.Stderr, "ERROR:", message)
-	fmt.Fprintln(os.Stderr)
-	fmt.Fprintln(os.Stderr, "Available command line options:")
-	flag.PrintDefaults()
-	os.Exit(64)
-}
-
-func stdinAvailable() bool {
-	stat, _ := os.Stdin.Stat()
-	return (stat.Mode() & os.ModeCharDevice) == 0
-}
diff --git a/vendor/github.com/Shopify/sarama/vagrant/boot_cluster.sh b/vendor/github.com/Shopify/sarama/vagrant/boot_cluster.sh
deleted file mode 100755
index 95e47dde43..0000000000
--- a/vendor/github.com/Shopify/sarama/vagrant/boot_cluster.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/sh
-
-set -ex
-
-# Launch and wait for toxiproxy
-${REPOSITORY_ROOT}/vagrant/run_toxiproxy.sh &
-while ! nc -q 1 localhost 2181 </dev/null; do echo "Waiting"; sleep 1; done
-while ! nc -q 1 localhost 9092 </dev/null; do echo "Waiting"; sleep 1; done
-
-# Launch and wait for Zookeeper
-for i in 1 2 3 4 5; do
-    KAFKA_PORT=`expr $i + 9090`
-    cd ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT} && bin/zookeeper-server-start.sh -daemon config/zookeeper.properties
-done
-while ! nc -q 1 localhost 21805 </dev/null; do echo "Waiting"; sleep 1; done
-
-# Launch and wait for Kafka
-for i in 1 2 3 4 5; do
-    KAFKA_PORT=`expr $i + 9090`
-    cd ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT} && bin/kafka-server-start.sh -daemon config/server.properties
-done
-while ! nc -q 1 localhost 29095 </dev/null; do echo "Waiting"; sleep 1; done
diff --git a/vendor/github.com/Shopify/sarama/vagrant/create_topics.sh b/vendor/github.com/Shopify/sarama/vagrant/create_topics.sh
deleted file mode 100755
index 1fe84fbd4d..0000000000
--- a/vendor/github.com/Shopify/sarama/vagrant/create_topics.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/sh
-
-set -ex
-
-cd ${KAFKA_INSTALL_ROOT}/kafka-9092
-bin/kafka-topics.sh --create --partitions 1 --replication-factor 3 --topic test.1 --zookeeper localhost:2181
-bin/kafka-topics.sh --create --partitions 4 --replication-factor 3 --topic test.4 --zookeeper localhost:2181
-bin/kafka-topics.sh --create --partitions 64 --replication-factor 3 --topic test.64  --zookeeper localhost:2181
diff --git a/vendor/github.com/Shopify/sarama/vagrant/halt_cluster.sh b/vendor/github.com/Shopify/sarama/vagrant/halt_cluster.sh
deleted file mode 100755
index f255bc3c6d..0000000000
--- a/vendor/github.com/Shopify/sarama/vagrant/halt_cluster.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/sh
-
-set -ex
-
-for i in 1 2 3 4 5; do
-    KAFKA_PORT=`expr $i + 9090`
-    cd ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT} && bin/kafka-server-stop.sh
-done
-
-for i in 1 2 3 4 5; do
-    KAFKA_PORT=`expr $i + 9090`
-    cd ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT} && bin/zookeeper-server-stop.sh
-done
-
-killall toxiproxy
diff --git a/vendor/github.com/Shopify/sarama/vagrant/install_cluster.sh b/vendor/github.com/Shopify/sarama/vagrant/install_cluster.sh
deleted file mode 100755
index 74f1f1a352..0000000000
--- a/vendor/github.com/Shopify/sarama/vagrant/install_cluster.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/sh
-
-set -ex
-
-TOXIPROXY_VERSION=2.0.0
-
-mkdir -p ${KAFKA_INSTALL_ROOT}
-if [ ! -f ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_VERSION}.tgz ]; then
-    wget --quiet http://apache.mirror.gtcomm.net/kafka/${KAFKA_VERSION}/kafka_2.11-${KAFKA_VERSION}.tgz -O ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_VERSION}.tgz
-fi
-if [ ! -f ${KAFKA_INSTALL_ROOT}/toxiproxy-${TOXIPROXY_VERSION} ]; then
-    wget --quiet https://github.com/Shopify/toxiproxy/releases/download/v${TOXIPROXY_VERSION}/toxiproxy-server-linux-amd64 -O ${KAFKA_INSTALL_ROOT}/toxiproxy-${TOXIPROXY_VERSION}
-    chmod +x ${KAFKA_INSTALL_ROOT}/toxiproxy-${TOXIPROXY_VERSION}
-fi
-rm -f ${KAFKA_INSTALL_ROOT}/toxiproxy
-ln -s ${KAFKA_INSTALL_ROOT}/toxiproxy-${TOXIPROXY_VERSION} ${KAFKA_INSTALL_ROOT}/toxiproxy
-
-for i in 1 2 3 4 5; do
-    ZK_PORT=`expr $i + 2180`
-    ZK_PORT_REAL=`expr $i + 21800`
-    KAFKA_PORT=`expr $i + 9090`
-    KAFKA_PORT_REAL=`expr $i + 29090`
-
-    # unpack kafka
-    mkdir -p ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}
-    tar xzf ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_VERSION}.tgz -C ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT} --strip-components 1
-
-    # broker configuration
-    cp ${REPOSITORY_ROOT}/vagrant/server.properties ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/
-    sed -i s/KAFKAID/${KAFKA_PORT}/g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/server.properties
-    sed -i s/KAFKAPORT/${KAFKA_PORT_REAL}/g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/server.properties
-    sed -i s/KAFKA_HOSTNAME/${KAFKA_HOSTNAME}/g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/server.properties
-    sed -i s/ZK_PORT/${ZK_PORT}/g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/server.properties
-
-    KAFKA_DATADIR="${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/data"
-    mkdir -p ${KAFKA_DATADIR}
-    sed -i s#KAFKA_DATADIR#${KAFKA_DATADIR}#g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/server.properties
-
-    # zookeeper configuration
-    cp ${REPOSITORY_ROOT}/vagrant/zookeeper.properties ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/
-    sed -i s/KAFKAID/${KAFKA_PORT}/g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/zookeeper.properties
-    sed -i s/ZK_PORT/${ZK_PORT_REAL}/g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/zookeeper.properties
-
-    ZK_DATADIR="${KAFKA_INSTALL_ROOT}/zookeeper-${ZK_PORT}"
-    mkdir -p ${ZK_DATADIR}
-    sed -i s#ZK_DATADIR#${ZK_DATADIR}#g ${KAFKA_INSTALL_ROOT}/kafka-${KAFKA_PORT}/config/zookeeper.properties
-
-    echo $i > ${KAFKA_INSTALL_ROOT}/zookeeper-${ZK_PORT}/myid
-done
diff --git a/vendor/github.com/Shopify/sarama/vagrant/kafka.conf b/vendor/github.com/Shopify/sarama/vagrant/kafka.conf
deleted file mode 100644
index 25101df5a3..0000000000
--- a/vendor/github.com/Shopify/sarama/vagrant/kafka.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-start on started zookeeper-ZK_PORT
-stop on stopping zookeeper-ZK_PORT
-
-# Use a script instead of exec (using env stanza leaks KAFKA_HEAP_OPTS from zookeeper)
-script
-  sleep 2
-  export KAFKA_HEAP_OPTS="-Xmx320m"
-  exec /opt/kafka-KAFKAID/bin/kafka-server-start.sh /opt/kafka-KAFKAID/config/server.properties
-end script
diff --git a/vendor/github.com/Shopify/sarama/vagrant/provision.sh b/vendor/github.com/Shopify/sarama/vagrant/provision.sh
deleted file mode 100755
index 13a8d56238..0000000000
--- a/vendor/github.com/Shopify/sarama/vagrant/provision.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/sh
-
-set -ex
-
-apt-get update
-yes | apt-get install default-jre
-
-export KAFKA_INSTALL_ROOT=/opt
-export KAFKA_HOSTNAME=192.168.100.67
-export KAFKA_VERSION=1.0.0
-export REPOSITORY_ROOT=/vagrant
-
-sh /vagrant/vagrant/install_cluster.sh
-sh /vagrant/vagrant/setup_services.sh
-sh /vagrant/vagrant/create_topics.sh
diff --git a/vendor/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh b/vendor/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh
deleted file mode 100755
index e52c00e7b5..0000000000
--- a/vendor/github.com/Shopify/sarama/vagrant/run_toxiproxy.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/sh
-
-set -ex
-
-${KAFKA_INSTALL_ROOT}/toxiproxy -port 8474 -host 0.0.0.0 &
-PID=$!
-
-while ! nc -q 1 localhost 8474 </dev/null; do echo "Waiting"; sleep 1; done
-
-wget -O/dev/null -S --post-data='{"name":"zk1", "upstream":"localhost:21801", "listen":"0.0.0.0:2181"}' localhost:8474/proxies
-wget -O/dev/null -S --post-data='{"name":"zk2", "upstream":"localhost:21802", "listen":"0.0.0.0:2182"}' localhost:8474/proxies
-wget -O/dev/null -S --post-data='{"name":"zk3", "upstream":"localhost:21803", "listen":"0.0.0.0:2183"}' localhost:8474/proxies
-wget -O/dev/null -S --post-data='{"name":"zk4", "upstream":"localhost:21804", "listen":"0.0.0.0:2184"}' localhost:8474/proxies
-wget -O/dev/null -S --post-data='{"name":"zk5", "upstream":"localhost:21805", "listen":"0.0.0.0:2185"}' localhost:8474/proxies
-
-wget -O/dev/null -S --post-data='{"name":"kafka1", "upstream":"localhost:29091", "listen":"0.0.0.0:9091"}' localhost:8474/proxies
-wget -O/dev/null -S --post-data='{"name":"kafka2", "upstream":"localhost:29092", "listen":"0.0.0.0:9092"}' localhost:8474/proxies
-wget -O/dev/null -S --post-data='{"name":"kafka3", "upstream":"localhost:29093", "listen":"0.0.0.0:9093"}' localhost:8474/proxies
-wget -O/dev/null -S --post-data='{"name":"kafka4", "upstream":"localhost:29094", "listen":"0.0.0.0:9094"}' localhost:8474/proxies
-wget -O/dev/null -S --post-data='{"name":"kafka5", "upstream":"localhost:29095", "listen":"0.0.0.0:9095"}' localhost:8474/proxies
-
-wait $PID
diff --git a/vendor/github.com/Shopify/sarama/vagrant/server.properties b/vendor/github.com/Shopify/sarama/vagrant/server.properties
deleted file mode 100644
index ca6e604cd8..0000000000
--- a/vendor/github.com/Shopify/sarama/vagrant/server.properties
+++ /dev/null
@@ -1,127 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# see kafka.server.KafkaConfig for additional details and defaults
-
-############################# Server Basics #############################
-
-# The id of the broker. This must be set to a unique integer for each broker.
-broker.id=KAFKAID
-reserved.broker.max.id=10000
-
-############################# Socket Server Settings #############################
-
-# The port the socket server listens on
-port=KAFKAPORT
-
-# Hostname the broker will bind to. If not set, the server will bind to all interfaces
-host.name=localhost
-
-# Hostname the broker will advertise to producers and consumers. If not set, it uses the
-# value for "host.name" if configured.  Otherwise, it will use the value returned from
-# java.net.InetAddress.getCanonicalHostName().
-advertised.host.name=KAFKA_HOSTNAME
-advertised.port=KAFKAID
-
-# The port to publish to ZooKeeper for clients to use. If this is not set,
-# it will publish the same port that the broker binds to.
-# advertised.port=<port accessible by clients>
-
-# The number of threads handling network requests
-num.network.threads=2
-
-# The number of threads doing disk I/O
-num.io.threads=8
-
-# The send buffer (SO_SNDBUF) used by the socket server
-socket.send.buffer.bytes=1048576
-
-# The receive buffer (SO_RCVBUF) used by the socket server
-socket.receive.buffer.bytes=1048576
-
-# The maximum size of a request that the socket server will accept (protection against OOM)
-socket.request.max.bytes=104857600
-
-
-############################# Log Basics #############################
-
-# A comma seperated list of directories under which to store log files
-log.dirs=KAFKA_DATADIR
-
-# The default number of log partitions per topic. More partitions allow greater
-# parallelism for consumption, but this will also result in more files across
-# the brokers.
-num.partitions=2
-
-# Create new topics with a replication factor of 2 so failover can be tested
-# more easily.
-default.replication.factor=2
-
-auto.create.topics.enable=false
-delete.topic.enable=true
-
-############################# Log Flush Policy #############################
-
-# Messages are immediately written to the filesystem but by default we only fsync() to sync
-# the OS cache lazily. The following configurations control the flush of data to disk.
-# There are a few important trade-offs here:
-#    1. Durability: Unflushed data may be lost if you are not using replication.
-#    2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
-#    3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.
-# The settings below allow one to configure the flush policy to flush data after a period of time or
-# every N messages (or both). This can be done globally and overridden on a per-topic basis.
-
-# The number of messages to accept before forcing a flush of data to disk
-#log.flush.interval.messages=10000
-
-# The maximum amount of time a message can sit in a log before we force a flush
-#log.flush.interval.ms=1000
-
-############################# Log Retention Policy #############################
-
-# The following configurations control the disposal of log segments. The policy can
-# be set to delete segments after a period of time, or after a given size has accumulated.
-# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
-# from the end of the log.
-
-# The minimum age of a log file to be eligible for deletion
-log.retention.hours=168
-
-# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining
-# segments don't drop below log.retention.bytes.
-log.retention.bytes=268435456
-
-# The maximum size of a log segment file. When this size is reached a new log segment will be created.
-log.segment.bytes=268435456
-
-# The interval at which log segments are checked to see if they can be deleted according
-# to the retention policies
-log.retention.check.interval.ms=60000
-
-# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires.
-# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction.
-log.cleaner.enable=false
-
-############################# Zookeeper #############################
-
-# Zookeeper connection string (see zookeeper docs for details).
-# This is a comma separated host:port pairs, each corresponding to a zk
-# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
-# You can also append an optional chroot string to the urls to specify the
-# root directory for all kafka znodes.
-zookeeper.connect=localhost:ZK_PORT
-
-# Timeout in ms for connecting to zookeeper
-zookeeper.session.timeout.ms=3000
-zookeeper.connection.timeout.ms=3000
diff --git a/vendor/github.com/Shopify/sarama/vagrant/setup_services.sh b/vendor/github.com/Shopify/sarama/vagrant/setup_services.sh
deleted file mode 100755
index 81d8ea05d3..0000000000
--- a/vendor/github.com/Shopify/sarama/vagrant/setup_services.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/sh
-
-set -ex
-
-stop toxiproxy || true
-cp ${REPOSITORY_ROOT}/vagrant/toxiproxy.conf /etc/init/toxiproxy.conf
-cp ${REPOSITORY_ROOT}/vagrant/run_toxiproxy.sh ${KAFKA_INSTALL_ROOT}/
-start toxiproxy
-
-for i in 1 2 3 4 5; do
-    ZK_PORT=`expr $i + 2180`
-    KAFKA_PORT=`expr $i + 9090`
-
-    stop zookeeper-${ZK_PORT} || true
-
-    # set up zk service
-    cp ${REPOSITORY_ROOT}/vagrant/zookeeper.conf /etc/init/zookeeper-${ZK_PORT}.conf
-    sed -i s/KAFKAID/${KAFKA_PORT}/g /etc/init/zookeeper-${ZK_PORT}.conf
-
-    # set up kafka service
-    cp ${REPOSITORY_ROOT}/vagrant/kafka.conf /etc/init/kafka-${KAFKA_PORT}.conf
-    sed -i s/KAFKAID/${KAFKA_PORT}/g /etc/init/kafka-${KAFKA_PORT}.conf
-    sed -i s/ZK_PORT/${ZK_PORT}/g /etc/init/kafka-${KAFKA_PORT}.conf
-
-    start zookeeper-${ZK_PORT}
-done
-
-# Wait for the last kafka node to finish booting
-while ! nc -q 1 localhost 29095 </dev/null; do echo "Waiting"; sleep 1; done
diff --git a/vendor/github.com/Shopify/sarama/vagrant/toxiproxy.conf b/vendor/github.com/Shopify/sarama/vagrant/toxiproxy.conf
deleted file mode 100644
index cc1226a31c..0000000000
--- a/vendor/github.com/Shopify/sarama/vagrant/toxiproxy.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-start on started networking
-stop on shutdown
-
-env KAFKA_INSTALL_ROOT=/opt
-
-exec /opt/run_toxiproxy.sh
diff --git a/vendor/github.com/Shopify/sarama/vagrant/zookeeper.conf b/vendor/github.com/Shopify/sarama/vagrant/zookeeper.conf
deleted file mode 100644
index 5a63e281cb..0000000000
--- a/vendor/github.com/Shopify/sarama/vagrant/zookeeper.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-start on started toxiproxy
-stop on stopping toxiproxy
-
-script
-  export KAFKA_HEAP_OPTS="-Xmx192m"
-  exec /opt/kafka-KAFKAID/bin/zookeeper-server-start.sh /opt/kafka-KAFKAID/config/zookeeper.properties
-end script
diff --git a/vendor/github.com/Shopify/sarama/vagrant/zookeeper.properties b/vendor/github.com/Shopify/sarama/vagrant/zookeeper.properties
deleted file mode 100644
index 43b0f62f50..0000000000
--- a/vendor/github.com/Shopify/sarama/vagrant/zookeeper.properties
+++ /dev/null
@@ -1,36 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# the directory where the snapshot is stored.
-dataDir=ZK_DATADIR
-# the port at which the clients will connect
-clientPort=ZK_PORT
-# disable the per-ip limit on the number of connections since this is a non-production config
-maxClientCnxns=0
-
-# The number of milliseconds of each tick
-tickTime=2000
-
-# The number of ticks that the initial synchronization phase can take
-initLimit=10
-
-# The number of ticks that can pass between
-# sending a request and getting an acknowledgement
-syncLimit=5
-
-server.1=localhost:2281:2381
-server.2=localhost:2282:2382
-server.3=localhost:2283:2383
-server.4=localhost:2284:2384
-server.5=localhost:2285:2385
diff --git a/vendor/github.com/docker/docker/vendor.conf b/vendor/github.com/docker/docker/vendor.conf
index 0b0de54923..bb7718bc42 100644
--- a/vendor/github.com/docker/docker/vendor.conf
+++ b/vendor/github.com/docker/docker/vendor.conf
@@ -34,6 +34,7 @@ github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870
 github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef
 github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25
 github.com/vishvananda/netlink 482f7a52b758233521878cb6c5904b6bd63f3457
+github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060
 github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
 github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d
 github.com/coreos/etcd 3a49cbb769ebd8d1dd25abb1e83386e9883a5707
diff --git a/vendor/github.com/pierrec/lz4/lz4c/main.go b/vendor/github.com/pierrec/lz4/lz4c/main.go
deleted file mode 100644
index 048ab5004f..0000000000
--- a/vendor/github.com/pierrec/lz4/lz4c/main.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Command line utility for the lz4 package.
-package main
-
-import (
-	// 	"bytes"
-
-	"flag"
-	"fmt"
-	"io"
-	"log"
-	"os"
-	"path"
-	"runtime"
-	"strings"
-
-	"github.com/pierrec/lz4"
-)
-
-func main() {
-	// Process command line arguments
-	var (
-		blockMaxSizeDefault = 4 << 20
-		flagStdout          = flag.Bool("c", false, "output to stdout")
-		flagDecompress      = flag.Bool("d", false, "decompress flag")
-		flagBlockMaxSize    = flag.Int("B", blockMaxSizeDefault, "block max size [64Kb,256Kb,1Mb,4Mb]")
-		flagBlockDependency = flag.Bool("BD", false, "enable block dependency")
-		flagBlockChecksum   = flag.Bool("BX", false, "enable block checksum")
-		flagStreamChecksum  = flag.Bool("Sx", false, "disable stream checksum")
-		flagHighCompression = flag.Bool("9", false, "enabled high compression")
-	)
-	flag.Usage = func() {
-		fmt.Fprintf(os.Stderr, "Usage:\n\t%s [arg] [input]...\n\tNo input means [de]compress stdin to stdout\n\n", os.Args[0])
-		flag.PrintDefaults()
-	}
-	flag.Parse()
-
-	// Use all CPUs
-	runtime.GOMAXPROCS(runtime.NumCPU())
-
-	zr := lz4.NewReader(nil)
-	zw := lz4.NewWriter(nil)
-	zh := lz4.Header{
-		BlockDependency: *flagBlockDependency,
-		BlockChecksum:   *flagBlockChecksum,
-		BlockMaxSize:    *flagBlockMaxSize,
-		NoChecksum:      *flagStreamChecksum,
-		HighCompression: *flagHighCompression,
-	}
-
-	worker := func(in io.Reader, out io.Writer) {
-		if *flagDecompress {
-			zr.Reset(in)
-			if _, err := io.Copy(out, zr); err != nil {
-				log.Fatalf("Error while decompressing input: %v", err)
-			}
-		} else {
-			zw.Reset(out)
-			zw.Header = zh
-			if _, err := io.Copy(zw, in); err != nil {
-				log.Fatalf("Error while compressing input: %v", err)
-			}
-			if err := zw.Close(); err != nil {
-				log.Fatalf("Error while closing stream: %v", err)
-			}
-		}
-	}
-
-	// No input means [de]compress stdin to stdout
-	if len(flag.Args()) == 0 {
-		worker(os.Stdin, os.Stdout)
-		os.Exit(0)
-	}
-
-	// Compress or decompress all input files
-	for _, inputFileName := range flag.Args() {
-		outputFileName := path.Clean(inputFileName)
-
-		if !*flagStdout {
-			if *flagDecompress {
-				outputFileName = strings.TrimSuffix(outputFileName, lz4.Extension)
-				if outputFileName == inputFileName {
-					log.Fatalf("Invalid output file name: same as input: %s", inputFileName)
-				}
-			} else {
-				outputFileName += lz4.Extension
-			}
-		}
-
-		inputFile, err := os.Open(inputFileName)
-		if err != nil {
-			log.Fatalf("Error while opening input: %v", err)
-		}
-
-		outputFile := os.Stdout
-		if !*flagStdout {
-			outputFile, err = os.Create(outputFileName)
-			if err != nil {
-				log.Fatalf("Error while opening output: %v", err)
-			}
-		}
-		worker(inputFile, outputFile)
-
-		inputFile.Close()
-		if !*flagStdout {
-			outputFile.Close()
-		}
-	}
-}
diff --git a/vendor/github.com/pierrec/xxHash/xxHash64/example_test.go b/vendor/github.com/pierrec/xxHash/xxHash64/example_test.go
deleted file mode 100644
index c5fa565982..0000000000
--- a/vendor/github.com/pierrec/xxHash/xxHash64/example_test.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package xxHash64_test
-
-import (
-	"bytes"
-	"fmt"
-	"github.com/pierrec/xxHash/xxHash64"
-)
-
-func ExampleNew() {
-	buf := bytes.NewBufferString("this is a test")
-	x := xxHash64.New(0xCAFE)
-	x.Write(buf.Bytes())
-	fmt.Printf("%x\n", x.Sum64())
-	// Output: 4228c3215949e862
-}
-
-func ExampleChecksum() {
-	buf := bytes.NewBufferString("this is a test")
-	fmt.Printf("%x\n", xxHash64.Checksum(buf.Bytes(), 0xCAFE))
-	// Output: 4228c3215949e862
-}
diff --git a/vendor/github.com/pierrec/xxHash/xxHash64/xxHash64.go b/vendor/github.com/pierrec/xxHash/xxHash64/xxHash64.go
deleted file mode 100644
index 2788e95069..0000000000
--- a/vendor/github.com/pierrec/xxHash/xxHash64/xxHash64.go
+++ /dev/null
@@ -1,249 +0,0 @@
-// Package xxHash64 implements the very fast xxHash hashing algorithm (64 bits version).
-// (https://github.com/Cyan4973/xxHash/)
-package xxHash64
-
-import "hash"
-
-const (
-	prime64_1 = 11400714785074694791
-	prime64_2 = 14029467366897019727
-	prime64_3 = 1609587929392839161
-	prime64_4 = 9650029242287828579
-	prime64_5 = 2870177450012600261
-)
-
-type xxHash struct {
-	seed     uint64
-	v1       uint64
-	v2       uint64
-	v3       uint64
-	v4       uint64
-	totalLen uint64
-	buf      [32]byte
-	bufused  int
-}
-
-// New returns a new Hash64 instance.
-func New(seed uint64) hash.Hash64 {
-	xxh := &xxHash{seed: seed}
-	xxh.Reset()
-	return xxh
-}
-
-// Sum appends the current hash to b and returns the resulting slice.
-// It does not change the underlying hash state.
-func (xxh xxHash) Sum(b []byte) []byte {
-	h64 := xxh.Sum64()
-	return append(b, byte(h64), byte(h64>>8), byte(h64>>16), byte(h64>>24), byte(h64>>32), byte(h64>>40), byte(h64>>48), byte(h64>>56))
-}
-
-// Reset resets the Hash to its initial state.
-func (xxh *xxHash) Reset() {
-	xxh.v1 = xxh.seed + prime64_1 + prime64_2
-	xxh.v2 = xxh.seed + prime64_2
-	xxh.v3 = xxh.seed
-	xxh.v4 = xxh.seed - prime64_1
-	xxh.totalLen = 0
-	xxh.bufused = 0
-}
-
-// Size returns the number of bytes returned by Sum().
-func (xxh *xxHash) Size() int {
-	return 8
-}
-
-// BlockSize gives the minimum number of bytes accepted by Write().
-func (xxh *xxHash) BlockSize() int {
-	return 1
-}
-
-// Write adds input bytes to the Hash.
-// It never returns an error.
-func (xxh *xxHash) Write(input []byte) (int, error) {
-	n := len(input)
-	m := xxh.bufused
-
-	xxh.totalLen += uint64(n)
-
-	r := len(xxh.buf) - m
-	if n < r {
-		copy(xxh.buf[m:], input)
-		xxh.bufused += len(input)
-		return n, nil
-	}
-
-	p := 0
-	if m > 0 {
-		// some data left from previous update
-		copy(xxh.buf[xxh.bufused:], input[:r])
-		xxh.bufused += len(input) - r
-
-		// fast rotl(31)
-		p64 := xxh.v1 + (uint64(xxh.buf[p+7])<<56|uint64(xxh.buf[p+6])<<48|uint64(xxh.buf[p+5])<<40|uint64(xxh.buf[p+4])<<32|uint64(xxh.buf[p+3])<<24|uint64(xxh.buf[p+2])<<16|uint64(xxh.buf[p+1])<<8|uint64(xxh.buf[p]))*prime64_2
-		xxh.v1 = (p64<<31 | p64>>33) * prime64_1
-		p += 8
-		p64 = xxh.v2 + (uint64(xxh.buf[p+7])<<56|uint64(xxh.buf[p+6])<<48|uint64(xxh.buf[p+5])<<40|uint64(xxh.buf[p+4])<<32|uint64(xxh.buf[p+3])<<24|uint64(xxh.buf[p+2])<<16|uint64(xxh.buf[p+1])<<8|uint64(xxh.buf[p]))*prime64_2
-		xxh.v2 = (p64<<31 | p64>>33) * prime64_1
-		p += 8
-		p64 = xxh.v3 + (uint64(xxh.buf[p+7])<<56|uint64(xxh.buf[p+6])<<48|uint64(xxh.buf[p+5])<<40|uint64(xxh.buf[p+4])<<32|uint64(xxh.buf[p+3])<<24|uint64(xxh.buf[p+2])<<16|uint64(xxh.buf[p+1])<<8|uint64(xxh.buf[p]))*prime64_2
-		xxh.v3 = (p64<<31 | p64>>33) * prime64_1
-		p += 8
-		p64 = xxh.v4 + (uint64(xxh.buf[p+7])<<56|uint64(xxh.buf[p+6])<<48|uint64(xxh.buf[p+5])<<40|uint64(xxh.buf[p+4])<<32|uint64(xxh.buf[p+3])<<24|uint64(xxh.buf[p+2])<<16|uint64(xxh.buf[p+1])<<8|uint64(xxh.buf[p]))*prime64_2
-		xxh.v4 = (p64<<31 | p64>>33) * prime64_1
-
-		p = r
-		xxh.bufused = 0
-	}
-
-	for n := n - 32; p <= n; {
-		p64 := xxh.v1 + (uint64(input[p+7])<<56|uint64(input[p+6])<<48|uint64(input[p+5])<<40|uint64(input[p+4])<<32|uint64(input[p+3])<<24|uint64(input[p+2])<<16|uint64(input[p+1])<<8|uint64(input[p]))*prime64_2
-		xxh.v1 = (p64<<31 | p64>>33) * prime64_1
-		p += 8
-		p64 = xxh.v2 + (uint64(input[p+7])<<56|uint64(input[p+6])<<48|uint64(input[p+5])<<40|uint64(input[p+4])<<32|uint64(input[p+3])<<24|uint64(input[p+2])<<16|uint64(input[p+1])<<8|uint64(input[p]))*prime64_2
-		xxh.v2 = (p64<<31 | p64>>33) * prime64_1
-		p += 8
-		p64 = xxh.v3 + (uint64(input[p+7])<<56|uint64(input[p+6])<<48|uint64(input[p+5])<<40|uint64(input[p+4])<<32|uint64(input[p+3])<<24|uint64(input[p+2])<<16|uint64(input[p+1])<<8|uint64(input[p]))*prime64_2
-		xxh.v3 = (p64<<31 | p64>>33) * prime64_1
-		p += 8
-		p64 = xxh.v4 + (uint64(input[p+7])<<56|uint64(input[p+6])<<48|uint64(input[p+5])<<40|uint64(input[p+4])<<32|uint64(input[p+3])<<24|uint64(input[p+2])<<16|uint64(input[p+1])<<8|uint64(input[p]))*prime64_2
-		xxh.v4 = (p64<<31 | p64>>33) * prime64_1
-		p += 8
-	}
-
-	copy(xxh.buf[xxh.bufused:], input[p:])
-	xxh.bufused += len(input) - p
-
-	return n, nil
-}
-
-// Sum64 returns the 64bits Hash value.
-func (xxh *xxHash) Sum64() uint64 {
-	var h64 uint64
-	if xxh.totalLen >= 32 {
-		h64 = ((xxh.v1 << 1) | (xxh.v1 >> 63)) +
-			((xxh.v2 << 7) | (xxh.v2 >> 57)) +
-			((xxh.v3 << 12) | (xxh.v3 >> 52)) +
-			((xxh.v4 << 18) | (xxh.v4 >> 46))
-
-		xxh.v1 *= prime64_2
-		h64 ^= ((xxh.v1 << 31) | (xxh.v1 >> 33)) * prime64_1
-		h64 = h64*prime64_1 + prime64_4
-
-		xxh.v2 *= prime64_2
-		h64 ^= ((xxh.v2 << 31) | (xxh.v2 >> 33)) * prime64_1
-		h64 = h64*prime64_1 + prime64_4
-
-		xxh.v3 *= prime64_2
-		h64 ^= ((xxh.v3 << 31) | (xxh.v3 >> 33)) * prime64_1
-		h64 = h64*prime64_1 + prime64_4
-
-		xxh.v4 *= prime64_2
-		h64 ^= ((xxh.v4 << 31) | (xxh.v4 >> 33)) * prime64_1
-		h64 = h64*prime64_1 + prime64_4 + xxh.totalLen
-	} else {
-		h64 = xxh.seed + prime64_5 + xxh.totalLen
-	}
-
-	p := 0
-	n := xxh.bufused
-	for n := n - 8; p <= n; p += 8 {
-		p64 := (uint64(xxh.buf[p+7])<<56 | uint64(xxh.buf[p+6])<<48 | uint64(xxh.buf[p+5])<<40 | uint64(xxh.buf[p+4])<<32 | uint64(xxh.buf[p+3])<<24 | uint64(xxh.buf[p+2])<<16 | uint64(xxh.buf[p+1])<<8 | uint64(xxh.buf[p])) * prime64_2
-		h64 ^= ((p64 << 31) | (p64 >> 33)) * prime64_1
-		h64 = ((h64<<27)|(h64>>37))*prime64_1 + prime64_4
-	}
-	if p+4 <= n {
-		h64 ^= (uint64(xxh.buf[p+3])<<24 | uint64(xxh.buf[p+2])<<16 | uint64(xxh.buf[p+1])<<8 | uint64(xxh.buf[p])) * prime64_1
-		h64 = ((h64<<23)|(h64>>41))*prime64_2 + prime64_3
-		p += 4
-	}
-	for ; p < n; p++ {
-		h64 ^= uint64(xxh.buf[p]) * prime64_5
-		h64 = ((h64 << 11) | (h64 >> 53)) * prime64_1
-	}
-
-	h64 ^= h64 >> 33
-	h64 *= prime64_2
-	h64 ^= h64 >> 29
-	h64 *= prime64_3
-	h64 ^= h64 >> 32
-
-	return h64
-}
-
-// Checksum returns the 64bits Hash value.
-func Checksum(input []byte, seed uint64) uint64 {
-	n := len(input)
-	var h64 uint64
-
-	if n >= 32 {
-		v1 := seed + prime64_1 + prime64_2
-		v2 := seed + prime64_2
-		v3 := seed
-		v4 := seed - prime64_1
-		p := 0
-		for n := n - 32; p <= n; {
-			p64 := v1 + (uint64(input[p+7])<<56|uint64(input[p+6])<<48|uint64(input[p+5])<<40|uint64(input[p+4])<<32|uint64(input[p+3])<<24|uint64(input[p+2])<<16|uint64(input[p+1])<<8|uint64(input[p]))*prime64_2
-			v1 = (p64<<31 | p64>>33) * prime64_1
-			p += 8
-			p64 = v2 + (uint64(input[p+7])<<56|uint64(input[p+6])<<48|uint64(input[p+5])<<40|uint64(input[p+4])<<32|uint64(input[p+3])<<24|uint64(input[p+2])<<16|uint64(input[p+1])<<8|uint64(input[p]))*prime64_2
-			v2 = (p64<<31 | p64>>33) * prime64_1
-			p += 8
-			p64 = v3 + (uint64(input[p+7])<<56|uint64(input[p+6])<<48|uint64(input[p+5])<<40|uint64(input[p+4])<<32|uint64(input[p+3])<<24|uint64(input[p+2])<<16|uint64(input[p+1])<<8|uint64(input[p]))*prime64_2
-			v3 = (p64<<31 | p64>>33) * prime64_1
-			p += 8
-			p64 = v4 + (uint64(input[p+7])<<56|uint64(input[p+6])<<48|uint64(input[p+5])<<40|uint64(input[p+4])<<32|uint64(input[p+3])<<24|uint64(input[p+2])<<16|uint64(input[p+1])<<8|uint64(input[p]))*prime64_2
-			v4 = (p64<<31 | p64>>33) * prime64_1
-			p += 8
-		}
-
-		h64 = ((v1 << 1) | (v1 >> 63)) +
-			((v2 << 7) | (v2 >> 57)) +
-			((v3 << 12) | (v3 >> 52)) +
-			((v4 << 18) | (v4 >> 46))
-
-		v1 *= prime64_2
-		h64 ^= ((v1 << 31) | (v1 >> 33)) * prime64_1
-		h64 = h64*prime64_1 + prime64_4
-
-		v2 *= prime64_2
-		h64 ^= ((v2 << 31) | (v2 >> 33)) * prime64_1
-		h64 = h64*prime64_1 + prime64_4
-
-		v3 *= prime64_2
-		h64 ^= ((v3 << 31) | (v3 >> 33)) * prime64_1
-		h64 = h64*prime64_1 + prime64_4
-
-		v4 *= prime64_2
-		h64 ^= ((v4 << 31) | (v4 >> 33)) * prime64_1
-		h64 = h64*prime64_1 + prime64_4 + uint64(n)
-
-		input = input[p:]
-		n -= p
-	} else {
-		h64 = seed + prime64_5 + uint64(n)
-	}
-
-	p := 0
-	for n := n - 8; p <= n; p += 8 {
-		p64 := (uint64(input[p+7])<<56 | uint64(input[p+6])<<48 | uint64(input[p+5])<<40 | uint64(input[p+4])<<32 | uint64(input[p+3])<<24 | uint64(input[p+2])<<16 | uint64(input[p+1])<<8 | uint64(input[p])) * prime64_2
-		h64 ^= ((p64 << 31) | (p64 >> 33)) * prime64_1
-		h64 = ((h64<<27)|(h64>>37))*prime64_1 + prime64_4
-	}
-	if p+4 <= n {
-		h64 ^= (uint64(input[p+3])<<24 | uint64(input[p+2])<<16 | uint64(input[p+1])<<8 | uint64(input[p])) * prime64_1
-		h64 = ((h64<<23)|(h64>>41))*prime64_2 + prime64_3
-		p += 4
-	}
-	for ; p < n; p++ {
-		h64 ^= uint64(input[p]) * prime64_5
-		h64 = ((h64 << 11) | (h64 >> 53)) * prime64_1
-	}
-
-	h64 ^= h64 >> 33
-	h64 *= prime64_2
-	h64 ^= h64 >> 29
-	h64 *= prime64_3
-	h64 ^= h64 >> 32
-
-	return h64
-}
diff --git a/vendor/github.com/pierrec/xxHash/xxHash64/xxHash64_test.go b/vendor/github.com/pierrec/xxHash/xxHash64/xxHash64_test.go
deleted file mode 100644
index e00a4ef4ca..0000000000
--- a/vendor/github.com/pierrec/xxHash/xxHash64/xxHash64_test.go
+++ /dev/null
@@ -1,150 +0,0 @@
-package xxHash64_test
-
-import (
-	"encoding/binary"
-	"hash/crc64"
-	"hash/fnv"
-	"testing"
-
-	"github.com/pierrec/xxHash/xxHash64"
-)
-
-type test struct {
-	sum             uint64
-	data, printable string
-}
-
-var testdata = []test{
-	{0xef46db3751d8e999, "", ""},
-	{0xd24ec4f1a98c6e5b, "a", ""},
-	{0x65f708ca92d04a61, "ab", ""},
-	{0x44bc2cf5ad770999, "abc", ""},
-	{0xde0327b0d25d92cc, "abcd", ""},
-	{0x07e3670c0c8dc7eb, "abcde", ""},
-	{0xfa8afd82c423144d, "abcdef", ""},
-	{0x1860940e2902822d, "abcdefg", ""},
-	{0x3ad351775b4634b7, "abcdefgh", ""},
-	{0x27f1a34fdbb95e13, "abcdefghi", ""},
-	{0xd6287a1de5498bb2, "abcdefghij", ""},
-	{0xbf2cd639b4143b80, "abcdefghijklmnopqrstuvwxyz012345", ""},
-	{0x64f23ecf1609b766, "abcdefghijklmnopqrstuvwxyz0123456789", ""},
-	{0xc5a8b11443765630, "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.", ""},
-}
-
-func init() {
-	for i := range testdata {
-		d := &testdata[i]
-		if len(d.data) > 20 {
-			d.printable = d.data[:20]
-		} else {
-			d.printable = d.data
-		}
-	}
-}
-
-func TestBlockSize(t *testing.T) {
-	xxh := xxHash64.New(0)
-	if s := xxh.BlockSize(); s <= 0 {
-		t.Errorf("invalid BlockSize: %d", s)
-	}
-}
-
-func TestSize(t *testing.T) {
-	xxh := xxHash64.New(0)
-	if s := xxh.Size(); s != 8 {
-		t.Errorf("invalid Size: got %d expected 8", s)
-	}
-}
-
-func TestData(t *testing.T) {
-	for i, td := range testdata {
-		xxh := xxHash64.New(0)
-		data := []byte(td.data)
-		xxh.Write(data)
-		if h := xxh.Sum64(); h != td.sum {
-			t.Errorf("test %d: xxh64(%s)=0x%x expected 0x%x", i, td.printable, h, td.sum)
-			t.FailNow()
-		}
-		if h := xxHash64.Checksum(data, 0); h != td.sum {
-			t.Errorf("test %d: xxh64(%s)=0x%x expected 0x%x", i, td.printable, h, td.sum)
-			t.FailNow()
-		}
-	}
-}
-
-func TestSplitData(t *testing.T) {
-	for i, td := range testdata {
-		xxh := xxHash64.New(0)
-		data := []byte(td.data)
-		l := len(data) / 2
-		xxh.Write(data[0:l])
-		xxh.Write(data[l:])
-		h := xxh.Sum64()
-		if h != td.sum {
-			t.Errorf("test %d: xxh64(%s)=0x%x expected 0x%x", i, td.printable, h, td.sum)
-			t.FailNow()
-		}
-	}
-}
-
-func TestSum(t *testing.T) {
-	for i, td := range testdata {
-		xxh := xxHash64.New(0)
-		data := []byte(td.data)
-		xxh.Write(data)
-		b := xxh.Sum(data)
-		if h := binary.LittleEndian.Uint64(b[len(data):]); h != td.sum {
-			t.Errorf("test %d: xxh64(%s)=0x%x expected 0x%x", i, td.printable, h, td.sum)
-			t.FailNow()
-		}
-	}
-}
-
-func TestReset(t *testing.T) {
-	xxh := xxHash64.New(0)
-	for i, td := range testdata {
-		xxh.Write([]byte(td.data))
-		h := xxh.Sum64()
-		if h != td.sum {
-			t.Errorf("test %d: xxh64(%s)=0x%x expected 0x%x", i, td.printable, h, td.sum)
-			t.FailNow()
-		}
-		xxh.Reset()
-	}
-}
-
-///////////////////////////////////////////////////////////////////////////////
-// Benchmarks
-//
-var testdata1 = []byte(testdata[len(testdata)-1].data)
-
-func Benchmark_XXH64(b *testing.B) {
-	h := xxHash64.New(0)
-	for n := 0; n < b.N; n++ {
-		h.Write(testdata1)
-		h.Sum64()
-		h.Reset()
-	}
-}
-
-func Benchmark_XXH64_Checksum(b *testing.B) {
-	for n := 0; n < b.N; n++ {
-		xxHash64.Checksum(testdata1, 0)
-	}
-}
-
-func Benchmark_CRC64(b *testing.B) {
-	t := crc64.MakeTable(0)
-	for i := 0; i < b.N; i++ {
-		crc64.Checksum(testdata1, t)
-	}
-}
-
-func Benchmark_Fnv64(b *testing.B) {
-	h := fnv.New64()
-	for i := 0; i < b.N; i++ {
-		h.Write(testdata1)
-		h.Sum64()
-		h.Reset()
-	}
-}
diff --git a/vendor/github.com/pierrec/xxHash/xxhsum/main.go b/vendor/github.com/pierrec/xxHash/xxhsum/main.go
deleted file mode 100644
index b73158bd9c..0000000000
--- a/vendor/github.com/pierrec/xxHash/xxhsum/main.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Command line interface to the xxHash32 and xxHash64 packages.
-// Usage:
-// 	xxHash [-mode 0] [-seed 123] filename1 [filename2...]
-// where
-//  mode: hash mode (0=32bits, 1=64bits) (default=1)
-//  seed: seed to be used (default=0)
-package main
-
-import (
-	"flag"
-	"fmt"
-	"hash"
-	"io"
-	"os"
-
-	"github.com/pierrec/xxHash/xxHash32"
-	"github.com/pierrec/xxHash/xxHash64"
-)
-
-func main() {
-	seed := flag.Uint64("seed", 0, "seed value")
-	mode := flag.Int("mode", 1, "hash mode: 0=32bits, 1=64bits")
-	flag.Parse()
-
-	var xxh hash.Hash
-	if *mode == 0 {
-		xxh = xxHash32.New(uint32(*seed))
-	} else {
-		xxh = xxHash64.New(*seed)
-	}
-
-	// Process each file in sequence
-	for _, filename := range flag.Args() {
-		inputFile, err := os.Open(filename)
-		if err != nil {
-			continue
-		}
-		if _, err := io.Copy(xxh, inputFile); err == nil {
-			fmt.Printf("%x %s\n", xxh.Sum(nil), filename)
-		}
-		inputFile.Close()
-		xxh.Reset()
-	}
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/cmd/metrics-bench/metrics-bench.go b/vendor/github.com/rcrowley/go-metrics/cmd/metrics-bench/metrics-bench.go
deleted file mode 100644
index dddaf4b126..0000000000
--- a/vendor/github.com/rcrowley/go-metrics/cmd/metrics-bench/metrics-bench.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package main
-
-import (
-	"fmt"
-	"github.com/rcrowley/go-metrics"
-	"time"
-)
-
-func main() {
-	r := metrics.NewRegistry()
-	for i := 0; i < 10000; i++ {
-		r.Register(fmt.Sprintf("counter-%d", i), metrics.NewCounter())
-		r.Register(fmt.Sprintf("gauge-%d", i), metrics.NewGauge())
-		r.Register(fmt.Sprintf("gaugefloat64-%d", i), metrics.NewGaugeFloat64())
-		r.Register(fmt.Sprintf("histogram-uniform-%d", i), metrics.NewHistogram(metrics.NewUniformSample(1028)))
-		r.Register(fmt.Sprintf("histogram-exp-%d", i), metrics.NewHistogram(metrics.NewExpDecaySample(1028, 0.015)))
-		r.Register(fmt.Sprintf("meter-%d", i), metrics.NewMeter())
-	}
-	time.Sleep(600e9)
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/cmd/metrics-example/metrics-example.go b/vendor/github.com/rcrowley/go-metrics/cmd/metrics-example/metrics-example.go
deleted file mode 100644
index 66f42c0468..0000000000
--- a/vendor/github.com/rcrowley/go-metrics/cmd/metrics-example/metrics-example.go
+++ /dev/null
@@ -1,154 +0,0 @@
-package main
-
-import (
-	"errors"
-	"github.com/rcrowley/go-metrics"
-	// "github.com/rcrowley/go-metrics/stathat"
-	"log"
-	"math/rand"
-	"os"
-	// "syslog"
-	"time"
-)
-
-const fanout = 10
-
-func main() {
-
-	r := metrics.NewRegistry()
-
-	c := metrics.NewCounter()
-	r.Register("foo", c)
-	for i := 0; i < fanout; i++ {
-		go func() {
-			for {
-				c.Dec(19)
-				time.Sleep(300e6)
-			}
-		}()
-		go func() {
-			for {
-				c.Inc(47)
-				time.Sleep(400e6)
-			}
-		}()
-	}
-
-	g := metrics.NewGauge()
-	r.Register("bar", g)
-	for i := 0; i < fanout; i++ {
-		go func() {
-			for {
-				g.Update(19)
-				time.Sleep(300e6)
-			}
-		}()
-		go func() {
-			for {
-				g.Update(47)
-				time.Sleep(400e6)
-			}
-		}()
-	}
-
-	gf := metrics.NewGaugeFloat64()
-	r.Register("barfloat64", gf)
-	for i := 0; i < fanout; i++ {
-		go func() {
-			for {
-				g.Update(19.0)
-				time.Sleep(300e6)
-			}
-		}()
-		go func() {
-			for {
-				g.Update(47.0)
-				time.Sleep(400e6)
-			}
-		}()
-	}
-
-	hc := metrics.NewHealthcheck(func(h metrics.Healthcheck) {
-		if 0 < rand.Intn(2) {
-			h.Healthy()
-		} else {
-			h.Unhealthy(errors.New("baz"))
-		}
-	})
-	r.Register("baz", hc)
-
-	s := metrics.NewExpDecaySample(1028, 0.015)
-	//s := metrics.NewUniformSample(1028)
-	h := metrics.NewHistogram(s)
-	r.Register("bang", h)
-	for i := 0; i < fanout; i++ {
-		go func() {
-			for {
-				h.Update(19)
-				time.Sleep(300e6)
-			}
-		}()
-		go func() {
-			for {
-				h.Update(47)
-				time.Sleep(400e6)
-			}
-		}()
-	}
-
-	m := metrics.NewMeter()
-	r.Register("quux", m)
-	for i := 0; i < fanout; i++ {
-		go func() {
-			for {
-				m.Mark(19)
-				time.Sleep(300e6)
-			}
-		}()
-		go func() {
-			for {
-				m.Mark(47)
-				time.Sleep(400e6)
-			}
-		}()
-	}
-
-	t := metrics.NewTimer()
-	r.Register("hooah", t)
-	for i := 0; i < fanout; i++ {
-		go func() {
-			for {
-				t.Time(func() { time.Sleep(300e6) })
-			}
-		}()
-		go func() {
-			for {
-				t.Time(func() { time.Sleep(400e6) })
-			}
-		}()
-	}
-
-	metrics.RegisterDebugGCStats(r)
-	go metrics.CaptureDebugGCStats(r, 5e9)
-
-	metrics.RegisterRuntimeMemStats(r)
-	go metrics.CaptureRuntimeMemStats(r, 5e9)
-
-	metrics.Log(r, 60e9, log.New(os.Stderr, "metrics: ", log.Lmicroseconds))
-
-	/*
-		w, err := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics")
-		if nil != err { log.Fatalln(err) }
-		metrics.Syslog(r, 60e9, w)
-	*/
-
-	/*
-		addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003")
-		metrics.Graphite(r, 10e9, "metrics", addr)
-	*/
-
-	/*
-		stathat.Stathat(r, 10e9, "example@example.com")
-	*/
-
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/cmd/never-read/never-read.go b/vendor/github.com/rcrowley/go-metrics/cmd/never-read/never-read.go
deleted file mode 100644
index dc175b778e..0000000000
--- a/vendor/github.com/rcrowley/go-metrics/cmd/never-read/never-read.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package main
-
-import (
-	"log"
-	"net"
-)
-
-func main() {
-	addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003")
-	l, err := net.ListenTCP("tcp", addr)
-	if nil != err {
-		log.Fatalln(err)
-	}
-	log.Println("listening", l.Addr())
-	for {
-		c, err := l.AcceptTCP()
-		if nil != err {
-			log.Fatalln(err)
-		}
-		log.Println("accepted", c.RemoteAddr())
-	}
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/exp/exp.go b/vendor/github.com/rcrowley/go-metrics/exp/exp.go
deleted file mode 100644
index 11dd3f898a..0000000000
--- a/vendor/github.com/rcrowley/go-metrics/exp/exp.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Hook go-metrics into expvar
-// on any /debug/metrics request, load all vars from the registry into expvar, and execute regular expvar handler
-package exp
-
-import (
-	"expvar"
-	"fmt"
-	"net/http"
-	"sync"
-
-	"github.com/rcrowley/go-metrics"
-)
-
-type exp struct {
-	expvarLock sync.Mutex // expvar panics if you try to register the same var twice, so we must probe it safely
-	registry   metrics.Registry
-}
-
-func (exp *exp) expHandler(w http.ResponseWriter, r *http.Request) {
-	// load our variables into expvar
-	exp.syncToExpvar()
-
-	// now just run the official expvar handler code (which is not publicly callable, so pasted inline)
-	w.Header().Set("Content-Type", "application/json; charset=utf-8")
-	fmt.Fprintf(w, "{\n")
-	first := true
-	expvar.Do(func(kv expvar.KeyValue) {
-		if !first {
-			fmt.Fprintf(w, ",\n")
-		}
-		first = false
-		fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
-	})
-	fmt.Fprintf(w, "\n}\n")
-}
-
-// Exp will register an expvar powered metrics handler with http.DefaultServeMux on "/debug/vars"
-func Exp(r metrics.Registry) {
-	h := ExpHandler(r)
-	// this would cause a panic:
-	// panic: http: multiple registrations for /debug/vars
-	// http.HandleFunc("/debug/vars", e.expHandler)
-	// haven't found an elegant way, so just use a different endpoint
-	http.Handle("/debug/metrics", h)
-}
-
-// ExpHandler will return an expvar powered metrics handler.
-func ExpHandler(r metrics.Registry) http.Handler {
-	e := exp{sync.Mutex{}, r}
-	return http.HandlerFunc(e.expHandler)
-}
-
-func (exp *exp) getInt(name string) *expvar.Int {
-	var v *expvar.Int
-	exp.expvarLock.Lock()
-	p := expvar.Get(name)
-	if p != nil {
-		v = p.(*expvar.Int)
-	} else {
-		v = new(expvar.Int)
-		expvar.Publish(name, v)
-	}
-	exp.expvarLock.Unlock()
-	return v
-}
-
-func (exp *exp) getFloat(name string) *expvar.Float {
-	var v *expvar.Float
-	exp.expvarLock.Lock()
-	p := expvar.Get(name)
-	if p != nil {
-		v = p.(*expvar.Float)
-	} else {
-		v = new(expvar.Float)
-		expvar.Publish(name, v)
-	}
-	exp.expvarLock.Unlock()
-	return v
-}
-
-func (exp *exp) publishCounter(name string, metric metrics.Counter) {
-	v := exp.getInt(name)
-	v.Set(metric.Count())
-}
-
-func (exp *exp) publishGauge(name string, metric metrics.Gauge) {
-	v := exp.getInt(name)
-	v.Set(metric.Value())
-}
-func (exp *exp) publishGaugeFloat64(name string, metric metrics.GaugeFloat64) {
-	exp.getFloat(name).Set(metric.Value())
-}
-
-func (exp *exp) publishHistogram(name string, metric metrics.Histogram) {
-	h := metric.Snapshot()
-	ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
-	exp.getInt(name + ".count").Set(h.Count())
-	exp.getFloat(name + ".min").Set(float64(h.Min()))
-	exp.getFloat(name + ".max").Set(float64(h.Max()))
-	exp.getFloat(name + ".mean").Set(float64(h.Mean()))
-	exp.getFloat(name + ".std-dev").Set(float64(h.StdDev()))
-	exp.getFloat(name + ".50-percentile").Set(float64(ps[0]))
-	exp.getFloat(name + ".75-percentile").Set(float64(ps[1]))
-	exp.getFloat(name + ".95-percentile").Set(float64(ps[2]))
-	exp.getFloat(name + ".99-percentile").Set(float64(ps[3]))
-	exp.getFloat(name + ".999-percentile").Set(float64(ps[4]))
-}
-
-func (exp *exp) publishMeter(name string, metric metrics.Meter) {
-	m := metric.Snapshot()
-	exp.getInt(name + ".count").Set(m.Count())
-	exp.getFloat(name + ".one-minute").Set(float64(m.Rate1()))
-	exp.getFloat(name + ".five-minute").Set(float64(m.Rate5()))
-	exp.getFloat(name + ".fifteen-minute").Set(float64((m.Rate15())))
-	exp.getFloat(name + ".mean").Set(float64(m.RateMean()))
-}
-
-func (exp *exp) publishTimer(name string, metric metrics.Timer) {
-	t := metric.Snapshot()
-	ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
-	exp.getInt(name + ".count").Set(t.Count())
-	exp.getFloat(name + ".min").Set(float64(t.Min()))
-	exp.getFloat(name + ".max").Set(float64(t.Max()))
-	exp.getFloat(name + ".mean").Set(float64(t.Mean()))
-	exp.getFloat(name + ".std-dev").Set(float64(t.StdDev()))
-	exp.getFloat(name + ".50-percentile").Set(float64(ps[0]))
-	exp.getFloat(name + ".75-percentile").Set(float64(ps[1]))
-	exp.getFloat(name + ".95-percentile").Set(float64(ps[2]))
-	exp.getFloat(name + ".99-percentile").Set(float64(ps[3]))
-	exp.getFloat(name + ".999-percentile").Set(float64(ps[4]))
-	exp.getFloat(name + ".one-minute").Set(float64(t.Rate1()))
-	exp.getFloat(name + ".five-minute").Set(float64(t.Rate5()))
-	exp.getFloat(name + ".fifteen-minute").Set(float64((t.Rate15())))
-	exp.getFloat(name + ".mean-rate").Set(float64(t.RateMean()))
-}
-
-func (exp *exp) syncToExpvar() {
-	exp.registry.Each(func(name string, i interface{}) {
-		switch i.(type) {
-		case metrics.Counter:
-			exp.publishCounter(name, i.(metrics.Counter))
-		case metrics.Gauge:
-			exp.publishGauge(name, i.(metrics.Gauge))
-		case metrics.GaugeFloat64:
-			exp.publishGaugeFloat64(name, i.(metrics.GaugeFloat64))
-		case metrics.Histogram:
-			exp.publishHistogram(name, i.(metrics.Histogram))
-		case metrics.Meter:
-			exp.publishMeter(name, i.(metrics.Meter))
-		case metrics.Timer:
-			exp.publishTimer(name, i.(metrics.Timer))
-		default:
-			panic(fmt.Sprintf("unsupported type for '%s': %T", name, i))
-		}
-	})
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/librato/client.go b/vendor/github.com/rcrowley/go-metrics/librato/client.go
deleted file mode 100644
index 8c0c850e38..0000000000
--- a/vendor/github.com/rcrowley/go-metrics/librato/client.go
+++ /dev/null
@@ -1,102 +0,0 @@
-package librato
-
-import (
-	"bytes"
-	"encoding/json"
-	"fmt"
-	"io/ioutil"
-	"net/http"
-)
-
-const Operations = "operations"
-const OperationsShort = "ops"
-
-type LibratoClient struct {
-	Email, Token string
-}
-
-// property strings
-const (
-	// display attributes
-	Color             = "color"
-	DisplayMax        = "display_max"
-	DisplayMin        = "display_min"
-	DisplayUnitsLong  = "display_units_long"
-	DisplayUnitsShort = "display_units_short"
-	DisplayStacked    = "display_stacked"
-	DisplayTransform  = "display_transform"
-	// special gauge display attributes
-	SummarizeFunction = "summarize_function"
-	Aggregate         = "aggregate"
-
-	// metric keys
-	Name        = "name"
-	Period      = "period"
-	Description = "description"
-	DisplayName = "display_name"
-	Attributes  = "attributes"
-
-	// measurement keys
-	MeasureTime = "measure_time"
-	Source      = "source"
-	Value       = "value"
-
-	// special gauge keys
-	Count      = "count"
-	Sum        = "sum"
-	Max        = "max"
-	Min        = "min"
-	SumSquares = "sum_squares"
-
-	// batch keys
-	Counters = "counters"
-	Gauges   = "gauges"
-
-	MetricsPostUrl = "https://metrics-api.librato.com/v1/metrics"
-)
-
-type Measurement map[string]interface{}
-type Metric map[string]interface{}
-
-type Batch struct {
-	Gauges      []Measurement `json:"gauges,omitempty"`
-	Counters    []Measurement `json:"counters,omitempty"`
-	MeasureTime int64         `json:"measure_time"`
-	Source      string        `json:"source"`
-}
-
-func (self *LibratoClient) PostMetrics(batch Batch) (err error) {
-	var (
-		js   []byte
-		req  *http.Request
-		resp *http.Response
-	)
-
-	if len(batch.Counters) == 0 && len(batch.Gauges) == 0 {
-		return nil
-	}
-
-	if js, err = json.Marshal(batch); err != nil {
-		return
-	}
-
-	if req, err = http.NewRequest("POST", MetricsPostUrl, bytes.NewBuffer(js)); err != nil {
-		return
-	}
-
-	req.Header.Set("Content-Type", "application/json")
-	req.SetBasicAuth(self.Email, self.Token)
-
-	if resp, err = http.DefaultClient.Do(req); err != nil {
-		return
-	}
-
-	if resp.StatusCode != http.StatusOK {
-		var body []byte
-		if body, err = ioutil.ReadAll(resp.Body); err != nil {
-			body = []byte(fmt.Sprintf("(could not fetch response body for error: %s)", err))
-		}
-		err = fmt.Errorf("Unable to post to Librato: %d %s %s", resp.StatusCode, resp.Status, string(body))
-	}
-	return
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/librato/librato.go b/vendor/github.com/rcrowley/go-metrics/librato/librato.go
deleted file mode 100644
index d7c0574684..0000000000
--- a/vendor/github.com/rcrowley/go-metrics/librato/librato.go
+++ /dev/null
@@ -1,235 +0,0 @@
-package librato
-
-import (
-	"fmt"
-	"log"
-	"math"
-	"regexp"
-	"time"
-
-	"github.com/rcrowley/go-metrics"
-)
-
-// a regexp for extracting the unit from time.Duration.String
-var unitRegexp = regexp.MustCompile("[^\\d]+$")
-
-// a helper that turns a time.Duration into librato display attributes for timer metrics
-func translateTimerAttributes(d time.Duration) (attrs map[string]interface{}) {
-	attrs = make(map[string]interface{})
-	attrs[DisplayTransform] = fmt.Sprintf("x/%d", int64(d))
-	attrs[DisplayUnitsShort] = string(unitRegexp.Find([]byte(d.String())))
-	return
-}
-
-type Reporter struct {
-	Email, Token    string
-	Namespace       string
-	Source          string
-	Interval        time.Duration
-	Registry        metrics.Registry
-	Percentiles     []float64              // percentiles to report on histogram metrics
-	TimerAttributes map[string]interface{} // units in which timers will be displayed
-	intervalSec     int64
-}
-
-func NewReporter(r metrics.Registry, d time.Duration, e string, t string, s string, p []float64, u time.Duration) *Reporter {
-	return &Reporter{e, t, "", s, d, r, p, translateTimerAttributes(u), int64(d / time.Second)}
-}
-
-func Librato(r metrics.Registry, d time.Duration, e string, t string, s string, p []float64, u time.Duration) {
-	NewReporter(r, d, e, t, s, p, u).Run()
-}
-
-func (self *Reporter) Run() {
-	log.Printf("WARNING: This client has been DEPRECATED! It has been moved to https://github.com/mihasya/go-metrics-librato and will be removed from rcrowley/go-metrics on August 5th 2015")
-	ticker := time.Tick(self.Interval)
-	metricsApi := &LibratoClient{self.Email, self.Token}
-	for now := range ticker {
-		var metrics Batch
-		var err error
-		if metrics, err = self.BuildRequest(now, self.Registry); err != nil {
-			log.Printf("ERROR constructing librato request body %s", err)
-			continue
-		}
-		if err := metricsApi.PostMetrics(metrics); err != nil {
-			log.Printf("ERROR sending metrics to librato %s", err)
-			continue
-		}
-	}
-}
-
-// calculate sum of squares from data provided by metrics.Histogram
-// see http://en.wikipedia.org/wiki/Standard_deviation#Rapid_calculation_methods
-func sumSquares(s metrics.Sample) float64 {
-	count := float64(s.Count())
-	sumSquared := math.Pow(count*s.Mean(), 2)
-	sumSquares := math.Pow(count*s.StdDev(), 2) + sumSquared/count
-	if math.IsNaN(sumSquares) {
-		return 0.0
-	}
-	return sumSquares
-}
-func sumSquaresTimer(t metrics.Timer) float64 {
-	count := float64(t.Count())
-	sumSquared := math.Pow(count*t.Mean(), 2)
-	sumSquares := math.Pow(count*t.StdDev(), 2) + sumSquared/count
-	if math.IsNaN(sumSquares) {
-		return 0.0
-	}
-	return sumSquares
-}
-
-func (self *Reporter) BuildRequest(now time.Time, r metrics.Registry) (snapshot Batch, err error) {
-	snapshot = Batch{
-		// coerce timestamps to a stepping fn so that they line up in Librato graphs
-		MeasureTime: (now.Unix() / self.intervalSec) * self.intervalSec,
-		Source:      self.Source,
-	}
-	snapshot.Gauges = make([]Measurement, 0)
-	snapshot.Counters = make([]Measurement, 0)
-	histogramGaugeCount := 1 + len(self.Percentiles)
-	r.Each(func(name string, metric interface{}) {
-		if self.Namespace != "" {
-			name = fmt.Sprintf("%s.%s", self.Namespace, name)
-		}
-		measurement := Measurement{}
-		measurement[Period] = self.Interval.Seconds()
-		switch m := metric.(type) {
-		case metrics.Counter:
-			if m.Count() > 0 {
-				measurement[Name] = fmt.Sprintf("%s.%s", name, "count")
-				measurement[Value] = float64(m.Count())
-				measurement[Attributes] = map[string]interface{}{
-					DisplayUnitsLong:  Operations,
-					DisplayUnitsShort: OperationsShort,
-					DisplayMin:        "0",
-				}
-				snapshot.Counters = append(snapshot.Counters, measurement)
-			}
-		case metrics.Gauge:
-			measurement[Name] = name
-			measurement[Value] = float64(m.Value())
-			snapshot.Gauges = append(snapshot.Gauges, measurement)
-		case metrics.GaugeFloat64:
-			measurement[Name] = name
-			measurement[Value] = float64(m.Value())
-			snapshot.Gauges = append(snapshot.Gauges, measurement)
-		case metrics.Histogram:
-			if m.Count() > 0 {
-				gauges := make([]Measurement, histogramGaugeCount, histogramGaugeCount)
-				s := m.Sample()
-				measurement[Name] = fmt.Sprintf("%s.%s", name, "hist")
-				measurement[Count] = uint64(s.Count())
-				measurement[Max] = float64(s.Max())
-				measurement[Min] = float64(s.Min())
-				measurement[Sum] = float64(s.Sum())
-				measurement[SumSquares] = sumSquares(s)
-				gauges[0] = measurement
-				for i, p := range self.Percentiles {
-					gauges[i+1] = Measurement{
-						Name:   fmt.Sprintf("%s.%.2f", measurement[Name], p),
-						Value:  s.Percentile(p),
-						Period: measurement[Period],
-					}
-				}
-				snapshot.Gauges = append(snapshot.Gauges, gauges...)
-			}
-		case metrics.Meter:
-			measurement[Name] = name
-			measurement[Value] = float64(m.Count())
-			snapshot.Counters = append(snapshot.Counters, measurement)
-			snapshot.Gauges = append(snapshot.Gauges,
-				Measurement{
-					Name:   fmt.Sprintf("%s.%s", name, "1min"),
-					Value:  m.Rate1(),
-					Period: int64(self.Interval.Seconds()),
-					Attributes: map[string]interface{}{
-						DisplayUnitsLong:  Operations,
-						DisplayUnitsShort: OperationsShort,
-						DisplayMin:        "0",
-					},
-				},
-				Measurement{
-					Name:   fmt.Sprintf("%s.%s", name, "5min"),
-					Value:  m.Rate5(),
-					Period: int64(self.Interval.Seconds()),
-					Attributes: map[string]interface{}{
-						DisplayUnitsLong:  Operations,
-						DisplayUnitsShort: OperationsShort,
-						DisplayMin:        "0",
-					},
-				},
-				Measurement{
-					Name:   fmt.Sprintf("%s.%s", name, "15min"),
-					Value:  m.Rate15(),
-					Period: int64(self.Interval.Seconds()),
-					Attributes: map[string]interface{}{
-						DisplayUnitsLong:  Operations,
-						DisplayUnitsShort: OperationsShort,
-						DisplayMin:        "0",
-					},
-				},
-			)
-		case metrics.Timer:
-			measurement[Name] = name
-			measurement[Value] = float64(m.Count())
-			snapshot.Counters = append(snapshot.Counters, measurement)
-			if m.Count() > 0 {
-				libratoName := fmt.Sprintf("%s.%s", name, "timer.mean")
-				gauges := make([]Measurement, histogramGaugeCount, histogramGaugeCount)
-				gauges[0] = Measurement{
-					Name:       libratoName,
-					Count:      uint64(m.Count()),
-					Sum:        m.Mean() * float64(m.Count()),
-					Max:        float64(m.Max()),
-					Min:        float64(m.Min()),
-					SumSquares: sumSquaresTimer(m),
-					Period:     int64(self.Interval.Seconds()),
-					Attributes: self.TimerAttributes,
-				}
-				for i, p := range self.Percentiles {
-					gauges[i+1] = Measurement{
-						Name:       fmt.Sprintf("%s.timer.%2.0f", name, p*100),
-						Value:      m.Percentile(p),
-						Period:     int64(self.Interval.Seconds()),
-						Attributes: self.TimerAttributes,
-					}
-				}
-				snapshot.Gauges = append(snapshot.Gauges, gauges...)
-				snapshot.Gauges = append(snapshot.Gauges,
-					Measurement{
-						Name:   fmt.Sprintf("%s.%s", name, "rate.1min"),
-						Value:  m.Rate1(),
-						Period: int64(self.Interval.Seconds()),
-						Attributes: map[string]interface{}{
-							DisplayUnitsLong:  Operations,
-							DisplayUnitsShort: OperationsShort,
-							DisplayMin:        "0",
-						},
-					},
-					Measurement{
-						Name:   fmt.Sprintf("%s.%s", name, "rate.5min"),
-						Value:  m.Rate5(),
-						Period: int64(self.Interval.Seconds()),
-						Attributes: map[string]interface{}{
-							DisplayUnitsLong:  Operations,
-							DisplayUnitsShort: OperationsShort,
-							DisplayMin:        "0",
-						},
-					},
-					Measurement{
-						Name:   fmt.Sprintf("%s.%s", name, "rate.15min"),
-						Value:  m.Rate15(),
-						Period: int64(self.Interval.Seconds()),
-						Attributes: map[string]interface{}{
-							DisplayUnitsLong:  Operations,
-							DisplayUnitsShort: OperationsShort,
-							DisplayMin:        "0",
-						},
-					},
-				)
-			}
-		}
-	})
-	return
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/stathat/stathat.go b/vendor/github.com/rcrowley/go-metrics/stathat/stathat.go
deleted file mode 100644
index 0afcb48482..0000000000
--- a/vendor/github.com/rcrowley/go-metrics/stathat/stathat.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Metrics output to StatHat.
-package stathat
-
-import (
-	"github.com/rcrowley/go-metrics"
-	"github.com/stathat/go"
-	"log"
-	"time"
-)
-
-func Stathat(r metrics.Registry, d time.Duration, userkey string) {
-	for {
-		if err := sh(r, userkey); nil != err {
-			log.Println(err)
-		}
-		time.Sleep(d)
-	}
-}
-
-func sh(r metrics.Registry, userkey string) error {
-	r.Each(func(name string, i interface{}) {
-		switch metric := i.(type) {
-		case metrics.Counter:
-			stathat.PostEZCount(name, userkey, int(metric.Count()))
-		case metrics.Gauge:
-			stathat.PostEZValue(name, userkey, float64(metric.Value()))
-		case metrics.GaugeFloat64:
-			stathat.PostEZValue(name, userkey, float64(metric.Value()))
-		case metrics.Histogram:
-			h := metric.Snapshot()
-			ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
-			stathat.PostEZCount(name+".count", userkey, int(h.Count()))
-			stathat.PostEZValue(name+".min", userkey, float64(h.Min()))
-			stathat.PostEZValue(name+".max", userkey, float64(h.Max()))
-			stathat.PostEZValue(name+".mean", userkey, float64(h.Mean()))
-			stathat.PostEZValue(name+".std-dev", userkey, float64(h.StdDev()))
-			stathat.PostEZValue(name+".50-percentile", userkey, float64(ps[0]))
-			stathat.PostEZValue(name+".75-percentile", userkey, float64(ps[1]))
-			stathat.PostEZValue(name+".95-percentile", userkey, float64(ps[2]))
-			stathat.PostEZValue(name+".99-percentile", userkey, float64(ps[3]))
-			stathat.PostEZValue(name+".999-percentile", userkey, float64(ps[4]))
-		case metrics.Meter:
-			m := metric.Snapshot()
-			stathat.PostEZCount(name+".count", userkey, int(m.Count()))
-			stathat.PostEZValue(name+".one-minute", userkey, float64(m.Rate1()))
-			stathat.PostEZValue(name+".five-minute", userkey, float64(m.Rate5()))
-			stathat.PostEZValue(name+".fifteen-minute", userkey, float64(m.Rate15()))
-			stathat.PostEZValue(name+".mean", userkey, float64(m.RateMean()))
-		case metrics.Timer:
-			t := metric.Snapshot()
-			ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
-			stathat.PostEZCount(name+".count", userkey, int(t.Count()))
-			stathat.PostEZValue(name+".min", userkey, float64(t.Min()))
-			stathat.PostEZValue(name+".max", userkey, float64(t.Max()))
-			stathat.PostEZValue(name+".mean", userkey, float64(t.Mean()))
-			stathat.PostEZValue(name+".std-dev", userkey, float64(t.StdDev()))
-			stathat.PostEZValue(name+".50-percentile", userkey, float64(ps[0]))
-			stathat.PostEZValue(name+".75-percentile", userkey, float64(ps[1]))
-			stathat.PostEZValue(name+".95-percentile", userkey, float64(ps[2]))
-			stathat.PostEZValue(name+".99-percentile", userkey, float64(ps[3]))
-			stathat.PostEZValue(name+".999-percentile", userkey, float64(ps[4]))
-			stathat.PostEZValue(name+".one-minute", userkey, float64(t.Rate1()))
-			stathat.PostEZValue(name+".five-minute", userkey, float64(t.Rate5()))
-			stathat.PostEZValue(name+".fifteen-minute", userkey, float64(t.Rate15()))
-			stathat.PostEZValue(name+".mean-rate", userkey, float64(t.RateMean()))
-		}
-	})
-	return nil
-}