diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
new file mode 100644
index 00000000000..f2c2bc21118
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
@@ -0,0 +1,66 @@
+# 0.10.0
+
+* feature: Add a test hook (#180)
+* feature: `ParseLevel` is now case-insensitive (#326)
+* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
+* performance: avoid re-allocations on `WithFields` (#335)
+
+# 0.9.0
+
+* logrus/text_formatter: don't emit empty msg
+* logrus/hooks/airbrake: move out of main repository
+* logrus/hooks/sentry: move out of main repository
+* logrus/hooks/papertrail: move out of main repository
+* logrus/hooks/bugsnag: move out of main repository
+* logrus/core: run tests with `-race`
+* logrus/core: detect TTY based on `stderr`
+* logrus/core: support `WithError` on logger
+* logrus/core: Solaris support
+
+# 0.8.7
+
+* logrus/core: fix possible race (#216)
+* logrus/doc: small typo fixes and doc improvements
+
+
+# 0.8.6
+
+* hooks/raven: allow passing an initialized client
+
+# 0.8.5
+
+* logrus/core: revert #208
+
+# 0.8.4
+
+* formatter/text: fix data race (#218)
+
+# 0.8.3
+
+* logrus/core: fix entry log level (#208)
+* logrus/core: improve performance of text formatter by 40%
+* logrus/core: expose `LevelHooks` type
+* logrus/core: add support for DragonflyBSD and NetBSD
+* formatter/text: print structs more verbosely
+
+# 0.8.2
+
+* logrus: fix more Fatal family functions
+
+# 0.8.1
+
+* logrus: fix not exiting on `Fatalf` and `Fatalln`
+
+# 0.8.0
+
+* logrus: defaults to stderr instead of stdout
+* hooks/sentry: add special field for `*http.Request`
+* formatter/text: ignore Windows for colors
+
+# 0.7.3
+
+* formatter/\*: allow configuration of timestamp layout
+
+# 0.7.2
+
+* formatter/text: Add configuration option for time format (#158)
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/Sirupsen/logrus/LICENSE
similarity index 100%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/LICENSE
rename to vendor/github.com/Sirupsen/logrus/LICENSE
diff --git a/vendor/github.com/Sirupsen/logrus/LICENSE~a6943e7452c65be4e907893e3eefdaa186cee610 b/vendor/github.com/Sirupsen/logrus/LICENSE~a6943e7452c65be4e907893e3eefdaa186cee610
new file mode 100755
index 00000000000..f090cb42f37
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/LICENSE~a6943e7452c65be4e907893e3eefdaa186cee610
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md
similarity index 69%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md
rename to vendor/github.com/Sirupsen/logrus/README.md
index 4be378476f4..f8302c373fb 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md
+++ b/vendor/github.com/Sirupsen/logrus/README.md
@@ -1,4 +1,4 @@
-# Logrus [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc]
+# Logrus [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus)
Logrus is a structured logger for Go (golang), completely API compatible with
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
@@ -12,7 +12,7 @@ plain text):
![Colored](http://i.imgur.com/PY7qMwd.png)
-With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
+With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
or Splunk:
```json
@@ -32,7 +32,7 @@ ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
```
-With the default `log.Formatter = new(&log.TextFormatter{})` when a TTY is not
+With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
attached, the output is compatible with the
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
@@ -75,17 +75,12 @@ package main
import (
"os"
log "github.com/Sirupsen/logrus"
- "github.com/Sirupsen/logrus/hooks/airbrake"
)
func init() {
// Log as JSON instead of the default ASCII formatter.
log.SetFormatter(&log.JSONFormatter{})
- // Use the Airbrake hook to report errors that have Error severity or above to
- // an exception tracker. You can create custom hooks, see the Hooks section.
- log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development"))
-
// Output to stderr instead of stdout, could also be a file.
log.SetOutput(os.Stderr)
@@ -182,13 +177,16 @@ Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
```go
import (
log "github.com/Sirupsen/logrus"
- "github.com/Sirupsen/logrus/hooks/airbrake"
+ "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
"log/syslog"
)
func init() {
- log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development"))
+
+ // Use the Airbrake hook to report errors that have Error severity or above to
+ // an exception tracker. You can create custom hooks, see the Hooks section.
+ log.AddHook(airbrake.NewHook(123, "xyz", "production"))
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
if err != nil {
@@ -198,25 +196,39 @@ func init() {
}
}
```
-
+Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
| Hook | Description |
| ----- | ----------- |
-| [Airbrake](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) | Send errors to an exception tracking service compatible with the Airbrake API. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
-| [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. |
+| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
+| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
+| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
-| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
-| [Sentry](https://github.com/Sirupsen/logrus/blob/master/hooks/sentry/sentry.go) | Send errors to the Sentry error logging and aggregation service. |
+| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
+| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
-| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) |
+| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
+| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
+| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
+| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) |
+| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
+| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
+| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
+| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
+| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
+| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
+| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
+| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
+| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
+| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
#### Level logging
@@ -272,7 +284,7 @@ init() {
// do something here to set environment depending on an environment variable
// or command-line flag
if Environment == "production" {
- log.SetFormatter(&logrus.JSONFormatter{})
+ log.SetFormatter(&log.JSONFormatter{})
} else {
// The TextFormatter is default, you don't actually have to do this.
log.SetFormatter(&log.TextFormatter{})
@@ -294,15 +306,12 @@ The built-in logging formatters are:
field to `true`. To force no colored output even if there is a TTY set the
`DisableColors` field to `true`
* `logrus.JSONFormatter`. Logs fields as JSON.
-* `logrus_logstash.LogstashFormatter`. Logs fields as Logstash Events (http://logstash.net).
-
- ```go
- logrus.SetFormatter(&logrus_logstash.LogstashFormatter{Type: “application_name"})
- ```
Third party logging formatters:
-* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
+* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
+* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
You can define your formatter by implementing the `Formatter` interface,
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
@@ -315,7 +324,7 @@ type MyJSONFormatter struct {
log.SetFormatter(new(MyJSONFormatter))
-func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
// Note this doesn't include Time, Level and Message which are available on
// the Entry. Consult `godoc` on information about those fields or read the
// source of the official loggers.
@@ -351,5 +360,43 @@ Log rotation is not provided with Logrus. Log rotation should be done by an
external program (like `logrotate(8)`) that can compress and delete old log
entries. It should not be a feature of the application-level logger.
+#### Tools
+
+| Tool | Description |
+| ---- | ----------- |
+|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
+
+#### Testing
+
+Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
-[godoc]: https://godoc.org/github.com/Sirupsen/logrus
+* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
+* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
+
+```go
+logger, hook := NewNullLogger()
+logger.Error("Hello error")
+
+assert.Equal(1, len(hook.Entries))
+assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
+assert.Equal("Hello error", hook.LastEntry().Message)
+
+hook.Reset()
+assert.Nil(hook.LastEntry())
+```
+
+#### Fatal handlers
+
+Logrus can register one or more functions that will be called when any `fatal`
+level message is logged. The registered handlers will be executed before
+logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
+to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
+
+```
+...
+handler := func() {
+ // gracefully shutdown something...
+}
+logrus.RegisterExitHandler(handler)
+...
+```
diff --git a/vendor/github.com/Sirupsen/logrus/README.md~a6943e7452c65be4e907893e3eefdaa186cee610 b/vendor/github.com/Sirupsen/logrus/README.md~a6943e7452c65be4e907893e3eefdaa186cee610
new file mode 100755
index 00000000000..8c01870152b
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/README.md~a6943e7452c65be4e907893e3eefdaa186cee610
@@ -0,0 +1,355 @@
+# Logrus [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc]
+
+Logrus is a structured logger for Go (golang), completely API compatible with
+the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
+yet stable (pre 1.0), the core API is unlikely to change much but please version
+control your Logrus to make sure you aren't fetching latest `master` on every
+build.**
+
+Nicely color-coded in development (when a TTY is attached, otherwise just
+plain text):
+
+![Colored](http://i.imgur.com/PY7qMwd.png)
+
+With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
+or Splunk:
+
+```json
+{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
+ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
+
+{"level":"warning","msg":"The group's number increased tremendously!",
+"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
+"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
+"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
+
+{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
+"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
+```
+
+With the default `log.Formatter = new(logrus.TextFormatter)` when a TTY is not
+attached, the output is compatible with the
+[logfmt](http://godoc.org/github.com/kr/logfmt) format:
+
+```text
+time="2014-04-20 15:36:23.830442383 -0400 EDT" level="info" msg="A group of walrus emerges from the ocean" animal="walrus" size=10
+time="2014-04-20 15:36:23.830584199 -0400 EDT" level="warning" msg="The group's number increased tremendously!" omg=true number=122
+time="2014-04-20 15:36:23.830596521 -0400 EDT" level="info" msg="A giant walrus appears!" animal="walrus" size=10
+time="2014-04-20 15:36:23.830611837 -0400 EDT" level="info" msg="Tremendously sized cow enters the ocean." animal="walrus" size=9
+time="2014-04-20 15:36:23.830626464 -0400 EDT" level="fatal" msg="The ice breaks!" omg=true number=100
+```
+
+#### Example
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+```go
+package main
+
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ }).Info("A walrus appears")
+}
+```
+
+Note that it's completely api-compatible with the stdlib logger, so you can
+replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
+and you'll now have the flexibility of Logrus. You can customize it all you
+want:
+
+```go
+package main
+
+import (
+ "os"
+ log "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/airbrake"
+)
+
+func init() {
+ // Log as JSON instead of the default ASCII formatter.
+ log.SetFormatter(&log.JSONFormatter{})
+
+ // Use the Airbrake hook to report errors that have Error severity or above to
+ // an exception tracker. You can create custom hooks, see the Hooks section.
+ log.AddHook(&logrus_airbrake.AirbrakeHook{})
+
+ // Output to stderr instead of stdout, could also be a file.
+ log.SetOutput(os.Stderr)
+
+ // Only log the warning severity or above.
+ log.SetLevel(log.WarnLevel)
+}
+
+func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 122,
+ }).Warn("The group's number increased tremendously!")
+
+ log.WithFields(log.Fields{
+ "omg": true,
+ "number": 100,
+ }).Fatal("The ice breaks!")
+}
+```
+
+For more advanced usage such as logging to multiple locations from the same
+application, you can also create an instance of the `logrus` Logger:
+
+```go
+package main
+
+import (
+ "github.com/Sirupsen/logrus"
+)
+
+// Create a new instance of the logger. You can have any number of instances.
+var log = logrus.New()
+
+func main() {
+ // The API for setting attributes is a little different than the package level
+ // exported logger. See Godoc.
+ log.Out = os.Stderr
+
+ log.WithFields(logrus.Fields{
+ "animal": "walrus",
+ "size": 10,
+ }).Info("A group of walrus emerges from the ocean")
+}
+```
+
+#### Fields
+
+Logrus encourages careful, structured logging though logging fields instead of
+long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
+to send event %s to topic %s with key %d")`, you should log the much more
+discoverable:
+
+```go
+log.WithFields(log.Fields{
+ "event": event,
+ "topic": topic,
+ "key": key,
+}).Fatal("Failed to send event")
+```
+
+We've found this API forces you to think about logging in a way that produces
+much more useful logging messages. We've been in countless situations where just
+a single added field to a log statement that was already there would've saved us
+hours. The `WithFields` call is optional.
+
+In general, with Logrus using any of the `printf`-family functions should be
+seen as a hint you should add a field, however, you can still use the
+`printf`-family functions with Logrus.
+
+#### Hooks
+
+You can add hooks for logging levels. For example to send errors to an exception
+tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
+multiple places simultaneously, e.g. syslog.
+
+```go
+// Not the real implementation of the Airbrake hook. Just a simple sample.
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+func init() {
+ log.AddHook(new(AirbrakeHook))
+}
+
+type AirbrakeHook struct{}
+
+// `Fire()` takes the entry that the hook is fired for. `entry.Data[]` contains
+// the fields for the entry. See the Fields section of the README.
+func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error {
+ err := airbrake.Notify(entry.Data["error"].(error))
+ if err != nil {
+ log.WithFields(log.Fields{
+ "source": "airbrake",
+ "endpoint": airbrake.Endpoint,
+ }).Info("Failed to send error to Airbrake")
+ }
+
+ return nil
+}
+
+// `Levels()` returns a slice of `Levels` the hook is fired for.
+func (hook *AirbrakeHook) Levels() []log.Level {
+ return []log.Level{
+ log.ErrorLevel,
+ log.FatalLevel,
+ log.PanicLevel,
+ }
+}
+```
+
+Logrus comes with built-in hooks. Add those, or your custom hook, in `init`:
+
+```go
+import (
+ log "github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus/hooks/airbrake"
+ "github.com/Sirupsen/logrus/hooks/syslog"
+ "log/syslog"
+)
+
+func init() {
+ log.AddHook(new(logrus_airbrake.AirbrakeHook))
+
+ hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+ if err != nil {
+ log.Error("Unable to connect to local syslog daemon")
+ } else {
+ log.AddHook(hook)
+ }
+}
+```
+
+* [`github.com/Sirupsen/logrus/hooks/airbrake`](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go)
+ Send errors to an exception tracking service compatible with the Airbrake API.
+ Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes.
+
+* [`github.com/Sirupsen/logrus/hooks/papertrail`](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go)
+ Send errors to the Papertrail hosted logging service via UDP.
+
+* [`github.com/Sirupsen/logrus/hooks/syslog`](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go)
+ Send errors to remote syslog server.
+ Uses standard library `log/syslog` behind the scenes.
+
+* [`github.com/nubo/hiprus`](https://github.com/nubo/hiprus)
+ Send errors to a channel in hipchat.
+
+* [`github.com/sebest/logrusly`](https://github.com/sebest/logrusly)
+ Send logs to Loggly (https://www.loggly.com/)
+
+* [`github.com/johntdyer/slackrus`](https://github.com/johntdyer/slackrus)
+ Hook for Slack chat.
+
+#### Level logging
+
+Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
+
+```go
+log.Debug("Useful debugging information.")
+log.Info("Something noteworthy happened!")
+log.Warn("You should probably take a look at this.")
+log.Error("Something failed but I'm not quitting.")
+// Calls os.Exit(1) after logging
+log.Fatal("Bye.")
+// Calls panic() after logging
+log.Panic("I'm bailing.")
+```
+
+You can set the logging level on a `Logger`, then it will only log entries with
+that severity or anything above it:
+
+```go
+// Will log anything that is info or above (warn, error, fatal, panic). Default.
+log.SetLevel(log.InfoLevel)
+```
+
+It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
+environment if your application has that.
+
+#### Entries
+
+Besides the fields added with `WithField` or `WithFields` some fields are
+automatically added to all logging events:
+
+1. `time`. The timestamp when the entry was created.
+2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
+ the `AddFields` call. E.g. `Failed to send event.`
+3. `level`. The logging level. E.g. `info`.
+
+#### Environments
+
+Logrus has no notion of environment.
+
+If you wish for hooks and formatters to only be used in specific environments,
+you should handle that yourself. For example, if your application has a global
+variable `Environment`, which is a string representation of the environment you
+could do:
+
+```go
+import (
+ log "github.com/Sirupsen/logrus"
+)
+
+init() {
+ // do something here to set environment depending on an environment variable
+ // or command-line flag
+ if Environment == "production" {
+ log.SetFormatter(logrus.JSONFormatter)
+ } else {
+ // The TextFormatter is default, you don't actually have to do this.
+ log.SetFormatter(logrus.TextFormatter)
+ }
+}
+```
+
+This configuration is how `logrus` was intended to be used, but JSON in
+production is mostly only useful if you do log aggregation with tools like
+Splunk or Logstash.
+
+#### Formatters
+
+The built-in logging formatters are:
+
+* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
+ without colors.
+ * *Note:* to force colored output when there is no TTY, set the `ForceColors`
+ field to `true`. To force no colored output even if there is a TTY set the
+ `DisableColors` field to `true`
+* `logrus.JSONFormatter`. Logs fields as JSON.
+
+Third party logging formatters:
+
+* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+
+You can define your formatter by implementing the `Formatter` interface,
+requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
+`Fields` type (`map[string]interface{}`) with all your fields as well as the
+default ones (see Entries section above):
+
+```go
+type MyJSONFormatter struct {
+}
+
+log.SetFormatter(new(MyJSONFormatter))
+
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+ // Note this doesn't include Time, Level and Message which are available on
+ // the Entry. Consult `godoc` on information about those fields or read the
+ // source of the official loggers.
+ serialized, err := json.Marshal(entry.Data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
+```
+
+#### Rotation
+
+Log rotation is not provided with Logrus. Log rotation should be done by an
+external program (like `logrotated(8)`) that can compress and delete old log
+entries. It should not be a feature of the application-level logger.
+
+
+[godoc]: https://godoc.org/github.com/Sirupsen/logrus
diff --git a/vendor/github.com/Sirupsen/logrus/alt_exit.go b/vendor/github.com/Sirupsen/logrus/alt_exit.go
new file mode 100644
index 00000000000..b4c9e84754a
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/alt_exit.go
@@ -0,0 +1,64 @@
+package logrus
+
+// The following code was sourced and modified from the
+// https://bitbucket.org/tebeka/atexit package governed by the following license:
+//
+// Copyright (c) 2012 Miki Tebeka .
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+// the Software, and to permit persons to whom the Software is furnished to do so,
+// subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import (
+ "fmt"
+ "os"
+)
+
+var handlers = []func(){}
+
+func runHandler(handler func()) {
+ defer func() {
+ if err := recover(); err != nil {
+ fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
+ }
+ }()
+
+ handler()
+}
+
+func runHandlers() {
+ for _, handler := range handlers {
+ runHandler(handler)
+ }
+}
+
+// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
+func Exit(code int) {
+ runHandlers()
+ os.Exit(code)
+}
+
+// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
+// all handlers. The handlers will also be invoked when any Fatal log entry is
+// made.
+//
+// This method is useful when a caller wishes to use logrus to log a fatal
+// message but also needs to gracefully shutdown. An example usecase could be
+// closing database connections, or sending a alert that the application is
+// closing.
+func RegisterExitHandler(handler func()) {
+ handlers = append(handlers, handler)
+}
diff --git a/vendor/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/Sirupsen/logrus/doc.go
new file mode 100644
index 00000000000..dddd5f877bf
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/doc.go
@@ -0,0 +1,26 @@
+/*
+Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
+
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+ package main
+
+ import (
+ log "github.com/Sirupsen/logrus"
+ )
+
+ func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "number": 1,
+ "size": 10,
+ }).Info("A walrus appears")
+ }
+
+Output:
+ time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
+
+For a full guide visit https://github.com/Sirupsen/logrus
+*/
+package logrus
diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go
new file mode 100644
index 00000000000..54bfc57d7f8
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/entry.go
@@ -0,0 +1,264 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "time"
+)
+
+// Defines the key when adding errors using WithError.
+var ErrorKey = "error"
+
+// An entry is the final or intermediate Logrus logging entry. It contains all
+// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
+// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
+// passed around as much as you wish to avoid field duplication.
+type Entry struct {
+ Logger *Logger
+
+ // Contains all the fields set by the user.
+ Data Fields
+
+ // Time at which the log entry was created
+ Time time.Time
+
+ // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
+ Level Level
+
+ // Message passed to Debug, Info, Warn, Error, Fatal or Panic
+ Message string
+}
+
+func NewEntry(logger *Logger) *Entry {
+ return &Entry{
+ Logger: logger,
+ // Default is three fields, give a little extra room
+ Data: make(Fields, 5),
+ }
+}
+
+// Returns a reader for the entry, which is a proxy to the formatter.
+func (entry *Entry) Reader() (*bytes.Buffer, error) {
+ serialized, err := entry.Logger.Formatter.Format(entry)
+ return bytes.NewBuffer(serialized), err
+}
+
+// Returns the string representation from the reader and ultimately the
+// formatter.
+func (entry *Entry) String() (string, error) {
+ reader, err := entry.Reader()
+ if err != nil {
+ return "", err
+ }
+
+ return reader.String(), err
+}
+
+// Add an error as single field (using the key defined in ErrorKey) to the Entry.
+func (entry *Entry) WithError(err error) *Entry {
+ return entry.WithField(ErrorKey, err)
+}
+
+// Add a single field to the Entry.
+func (entry *Entry) WithField(key string, value interface{}) *Entry {
+ return entry.WithFields(Fields{key: value})
+}
+
+// Add a map of fields to the Entry.
+func (entry *Entry) WithFields(fields Fields) *Entry {
+ data := make(Fields, len(entry.Data)+len(fields))
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ for k, v := range fields {
+ data[k] = v
+ }
+ return &Entry{Logger: entry.Logger, Data: data}
+}
+
+// This function is not declared with a pointer value because otherwise
+// race conditions will occur when using multiple goroutines
+func (entry Entry) log(level Level, msg string) {
+ entry.Time = time.Now()
+ entry.Level = level
+ entry.Message = msg
+
+ if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+ entry.Logger.mu.Unlock()
+ }
+
+ reader, err := entry.Reader()
+ if err != nil {
+ entry.Logger.mu.Lock()
+ fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
+ entry.Logger.mu.Unlock()
+ }
+
+ entry.Logger.mu.Lock()
+ defer entry.Logger.mu.Unlock()
+
+ _, err = io.Copy(entry.Logger.Out, reader)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+ }
+
+ // To avoid Entry#log() returning a value that only would make sense for
+ // panic() to use in Entry#Panic(), we avoid the allocation by checking
+ // directly here.
+ if level <= PanicLevel {
+ panic(&entry)
+ }
+}
+
+func (entry *Entry) Debug(args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.log(DebugLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Print(args ...interface{}) {
+ entry.Info(args...)
+}
+
+func (entry *Entry) Info(args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.log(InfoLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Warn(args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.log(WarnLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Warning(args ...interface{}) {
+ entry.Warn(args...)
+}
+
+func (entry *Entry) Error(args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.log(ErrorLevel, fmt.Sprint(args...))
+ }
+}
+
+func (entry *Entry) Fatal(args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.log(FatalLevel, fmt.Sprint(args...))
+ }
+ Exit(1)
+}
+
+func (entry *Entry) Panic(args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.log(PanicLevel, fmt.Sprint(args...))
+ }
+ panic(fmt.Sprint(args...))
+}
+
+// Entry Printf family functions
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.Debug(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Infof(format string, args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.Info(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Printf(format string, args ...interface{}) {
+ entry.Infof(format, args...)
+}
+
+func (entry *Entry) Warnf(format string, args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.Warn(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Warningf(format string, args ...interface{}) {
+ entry.Warnf(format, args...)
+}
+
+func (entry *Entry) Errorf(format string, args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.Error(fmt.Sprintf(format, args...))
+ }
+}
+
+func (entry *Entry) Fatalf(format string, args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.Fatal(fmt.Sprintf(format, args...))
+ }
+ Exit(1)
+}
+
+func (entry *Entry) Panicf(format string, args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.Panic(fmt.Sprintf(format, args...))
+ }
+}
+
+// Entry Println family functions
+
+func (entry *Entry) Debugln(args ...interface{}) {
+ if entry.Logger.Level >= DebugLevel {
+ entry.Debug(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Infoln(args ...interface{}) {
+ if entry.Logger.Level >= InfoLevel {
+ entry.Info(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Println(args ...interface{}) {
+ entry.Infoln(args...)
+}
+
+func (entry *Entry) Warnln(args ...interface{}) {
+ if entry.Logger.Level >= WarnLevel {
+ entry.Warn(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Warningln(args ...interface{}) {
+ entry.Warnln(args...)
+}
+
+func (entry *Entry) Errorln(args ...interface{}) {
+ if entry.Logger.Level >= ErrorLevel {
+ entry.Error(entry.sprintlnn(args...))
+ }
+}
+
+func (entry *Entry) Fatalln(args ...interface{}) {
+ if entry.Logger.Level >= FatalLevel {
+ entry.Fatal(entry.sprintlnn(args...))
+ }
+ Exit(1)
+}
+
+func (entry *Entry) Panicln(args ...interface{}) {
+ if entry.Logger.Level >= PanicLevel {
+ entry.Panic(entry.sprintlnn(args...))
+ }
+}
+
+// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// fmt.Sprintln where spaces are always added between operands, regardless of
+// their type. Instead of vendoring the Sprintln implementation to spare a
+// string allocation, we do the simplest thing.
+func (entry *Entry) sprintlnn(args ...interface{}) string {
+ msg := fmt.Sprintln(args...)
+ return msg[:len(msg)-1]
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go~a6943e7452c65be4e907893e3eefdaa186cee610
old mode 100644
new mode 100755
similarity index 99%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go
rename to vendor/github.com/Sirupsen/logrus/entry.go~a6943e7452c65be4e907893e3eefdaa186cee610
index 699ea035cce..17fe6f707bc
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go
+++ b/vendor/github.com/Sirupsen/logrus/entry.go~a6943e7452c65be4e907893e3eefdaa186cee610
@@ -188,7 +188,6 @@ func (entry *Entry) Fatalf(format string, args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.Fatal(fmt.Sprintf(format, args...))
}
- os.Exit(1)
}
func (entry *Entry) Panicf(format string, args ...interface{}) {
@@ -235,7 +234,6 @@ func (entry *Entry) Fatalln(args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.Fatal(entry.sprintlnn(args...))
}
- os.Exit(1)
}
func (entry *Entry) Panicln(args ...interface{}) {
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go
similarity index 95%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go
rename to vendor/github.com/Sirupsen/logrus/exported.go
index a67e1b802d9..9a0120ac1dd 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go
+++ b/vendor/github.com/Sirupsen/logrus/exported.go
@@ -48,6 +48,11 @@ func AddHook(hook Hook) {
std.Hooks.Add(hook)
}
+// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
+func WithError(err error) *Entry {
+ return std.WithField(ErrorKey, err)
+}
+
// WithField creates an entry from the standard logger and adds a field to
// it. If you want multiple fields, use `WithFields`.
//
diff --git a/vendor/github.com/Sirupsen/logrus/exported.go~a6943e7452c65be4e907893e3eefdaa186cee610 b/vendor/github.com/Sirupsen/logrus/exported.go~a6943e7452c65be4e907893e3eefdaa186cee610
new file mode 100755
index 00000000000..d0871244814
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/exported.go~a6943e7452c65be4e907893e3eefdaa186cee610
@@ -0,0 +1,182 @@
+package logrus
+
+import (
+ "io"
+)
+
+var (
+ // std is the name of the standard logger in stdlib `log`
+ std = New()
+)
+
+// SetOutput sets the standard logger output.
+func SetOutput(out io.Writer) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Out = out
+}
+
+// SetFormatter sets the standard logger formatter.
+func SetFormatter(formatter Formatter) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Formatter = formatter
+}
+
+// SetLevel sets the standard logger level.
+func SetLevel(level Level) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Level = level
+}
+
+// GetLevel returns the standard logger level.
+func GetLevel() Level {
+ return std.Level
+}
+
+// AddHook adds a hook to the standard logger hooks.
+func AddHook(hook Hook) {
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ std.Hooks.Add(hook)
+}
+
+// WithField creates an entry from the standard logger and adds a field to
+// it. If you want multiple fields, use `WithFields`.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithField(key string, value interface{}) *Entry {
+ return std.WithField(key, value)
+}
+
+// WithFields creates an entry from the standard logger and adds multiple
+// fields to it. This is simply a helper for `WithField`, invoking it
+// once for each field.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithFields(fields Fields) *Entry {
+ return std.WithFields(fields)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+ std.Debug(args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+ std.Print(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+ std.Info(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+ std.Warn(args...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+ std.Warning(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+ std.Error(args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+ std.Panic(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+ std.Fatal(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+ std.Debugf(format, args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+ std.Printf(format, args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+ std.Infof(format, args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+ std.Warnf(format, args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+ std.Warningf(format, args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+ std.Errorf(format, args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+ std.Panicf(format, args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+ std.Fatalf(format, args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+ std.Debugln(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+ std.Println(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+ std.Infoln(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+ std.Warnln(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+ std.Warningln(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+ std.Errorln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+ std.Panicln(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+ std.Fatalln(args...)
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go
similarity index 85%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter.go
rename to vendor/github.com/Sirupsen/logrus/formatter.go
index 104d689f187..b5fbe934d12 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter.go
+++ b/vendor/github.com/Sirupsen/logrus/formatter.go
@@ -31,18 +31,15 @@ type Formatter interface {
// It's not exported because it's still using Data in an opinionated way. It's to
// avoid code duplication between the two default formatters.
func prefixFieldClashes(data Fields) {
- _, ok := data["time"]
- if ok {
- data["fields.time"] = data["time"]
+ if t, ok := data["time"]; ok {
+ data["fields.time"] = t
}
- _, ok = data["msg"]
- if ok {
- data["fields.msg"] = data["msg"]
+ if m, ok := data["msg"]; ok {
+ data["fields.msg"] = m
}
- _, ok = data["level"]
- if ok {
- data["fields.level"] = data["level"]
+ if l, ok := data["level"]; ok {
+ data["fields.level"] = l
}
}
diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go~a6943e7452c65be4e907893e3eefdaa186cee610 b/vendor/github.com/Sirupsen/logrus/formatter.go~a6943e7452c65be4e907893e3eefdaa186cee610
new file mode 100755
index 00000000000..038ce9fd297
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/formatter.go~a6943e7452c65be4e907893e3eefdaa186cee610
@@ -0,0 +1,44 @@
+package logrus
+
+// The Formatter interface is used to implement a custom Formatter. It takes an
+// `Entry`. It exposes all the fields, including the default ones:
+//
+// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
+// * `entry.Data["time"]`. The timestamp.
+// * `entry.Data["level"]. The level the entry was logged at.
+//
+// Any additional fields added with `WithField` or `WithFields` are also in
+// `entry.Data`. Format is expected to return an array of bytes which are then
+// logged to `logger.Out`.
+type Formatter interface {
+ Format(*Entry) ([]byte, error)
+}
+
+// This is to not silently overwrite `time`, `msg` and `level` fields when
+// dumping it. If this code wasn't there doing:
+//
+// logrus.WithField("level", 1).Info("hello")
+//
+// Would just silently drop the user provided level. Instead with this code
+// it'll logged as:
+//
+// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
+//
+// It's not exported because it's still using Data in an opinionated way. It's to
+// avoid code duplication between the two default formatters.
+func prefixFieldClashes(data Fields) {
+ _, ok := data["time"]
+ if ok {
+ data["fields.time"] = data["time"]
+ }
+
+ _, ok = data["msg"]
+ if ok {
+ data["fields.msg"] = data["msg"]
+ }
+
+ _, ok = data["level"]
+ if ok {
+ data["fields.level"] = data["level"]
+ }
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/Sirupsen/logrus/hooks.go
similarity index 100%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hooks.go
rename to vendor/github.com/Sirupsen/logrus/hooks.go
diff --git a/vendor/github.com/Sirupsen/logrus/hooks.go~a6943e7452c65be4e907893e3eefdaa186cee610 b/vendor/github.com/Sirupsen/logrus/hooks.go~a6943e7452c65be4e907893e3eefdaa186cee610
new file mode 100755
index 00000000000..0da2b3653f5
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/hooks.go~a6943e7452c65be4e907893e3eefdaa186cee610
@@ -0,0 +1,34 @@
+package logrus
+
+// A hook to be fired when logging on the logging levels returned from
+// `Levels()` on your implementation of the interface. Note that this is not
+// fired in a goroutine or a channel with workers, you should handle such
+// functionality yourself if your call is non-blocking and you don't wish for
+// the logging calls for levels returned from `Levels()` to block.
+type Hook interface {
+ Levels() []Level
+ Fire(*Entry) error
+}
+
+// Internal type for storing the hooks on a logger instance.
+type levelHooks map[Level][]Hook
+
+// Add a hook to an instance of logger. This is called with
+// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
+func (hooks levelHooks) Add(hook Hook) {
+ for _, level := range hook.Levels() {
+ hooks[level] = append(hooks[level], hook)
+ }
+}
+
+// Fire all the hooks for the passed level. Used by `entry.log` to fire
+// appropriate hooks for a log entry.
+func (hooks levelHooks) Fire(level Level, entry *Entry) error {
+ for _, hook := range hooks[level] {
+ if err := hook.Fire(entry); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go
similarity index 100%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter.go
rename to vendor/github.com/Sirupsen/logrus/json_formatter.go
diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go~a6943e7452c65be4e907893e3eefdaa186cee610 b/vendor/github.com/Sirupsen/logrus/json_formatter.go~a6943e7452c65be4e907893e3eefdaa186cee610
new file mode 100755
index 00000000000..b09227c2b5d
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/json_formatter.go~a6943e7452c65be4e907893e3eefdaa186cee610
@@ -0,0 +1,26 @@
+package logrus
+
+import (
+ "encoding/json"
+ "fmt"
+ "time"
+)
+
+type JSONFormatter struct{}
+
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+ data := make(Fields, len(entry.Data)+3)
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ prefixFieldClashes(data)
+ data["time"] = entry.Time.Format(time.RFC3339)
+ data["msg"] = entry.Message
+ data["level"] = entry.Level.String()
+
+ serialized, err := json.Marshal(data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go
similarity index 92%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go
rename to vendor/github.com/Sirupsen/logrus/logger.go
index e4974bfbe78..9052a8065cc 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go
+++ b/vendor/github.com/Sirupsen/logrus/logger.go
@@ -8,7 +8,7 @@ import (
type Logger struct {
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
- // file, or leave it default which is `os.Stdout`. You can also set this to
+ // file, or leave it default which is `os.Stderr`. You can also set this to
// something more adventorous, such as logging to Kafka.
Out io.Writer
// Hooks for the logger instance. These allow firing events based on logging
@@ -51,9 +51,9 @@ func New() *Logger {
}
}
-// Adds a field to the log entry, note that you it doesn't log until you call
+// Adds a field to the log entry, note that it doesn't log until you call
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
-// Ff you want multiple fields, use `WithFields`.
+// If you want multiple fields, use `WithFields`.
func (logger *Logger) WithField(key string, value interface{}) *Entry {
return NewEntry(logger).WithField(key, value)
}
@@ -64,6 +64,12 @@ func (logger *Logger) WithFields(fields Fields) *Entry {
return NewEntry(logger).WithFields(fields)
}
+// Add an error as single field to the log entry. All it does is call
+// `WithError` for the given `error`.
+func (logger *Logger) WithError(err error) *Entry {
+ return NewEntry(logger).WithError(err)
+}
+
func (logger *Logger) Debugf(format string, args ...interface{}) {
if logger.Level >= DebugLevel {
NewEntry(logger).Debugf(format, args...)
@@ -102,7 +108,7 @@ func (logger *Logger) Fatalf(format string, args ...interface{}) {
if logger.Level >= FatalLevel {
NewEntry(logger).Fatalf(format, args...)
}
- os.Exit(1)
+ Exit(1)
}
func (logger *Logger) Panicf(format string, args ...interface{}) {
@@ -149,7 +155,7 @@ func (logger *Logger) Fatal(args ...interface{}) {
if logger.Level >= FatalLevel {
NewEntry(logger).Fatal(args...)
}
- os.Exit(1)
+ Exit(1)
}
func (logger *Logger) Panic(args ...interface{}) {
@@ -196,7 +202,7 @@ func (logger *Logger) Fatalln(args ...interface{}) {
if logger.Level >= FatalLevel {
NewEntry(logger).Fatalln(args...)
}
- os.Exit(1)
+ Exit(1)
}
func (logger *Logger) Panicln(args ...interface{}) {
diff --git a/vendor/github.com/Sirupsen/logrus/logger.go~a6943e7452c65be4e907893e3eefdaa186cee610 b/vendor/github.com/Sirupsen/logrus/logger.go~a6943e7452c65be4e907893e3eefdaa186cee610
new file mode 100755
index 00000000000..b392e547a7b
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/logger.go~a6943e7452c65be4e907893e3eefdaa186cee610
@@ -0,0 +1,161 @@
+package logrus
+
+import (
+ "io"
+ "os"
+ "sync"
+)
+
+type Logger struct {
+ // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
+ // file, or leave it default which is `os.Stdout`. You can also set this to
+ // something more adventorous, such as logging to Kafka.
+ Out io.Writer
+ // Hooks for the logger instance. These allow firing events based on logging
+ // levels and log entries. For example, to send errors to an error tracking
+ // service, log to StatsD or dump the core on fatal errors.
+ Hooks levelHooks
+ // All log entries pass through the formatter before logged to Out. The
+ // included formatters are `TextFormatter` and `JSONFormatter` for which
+ // TextFormatter is the default. In development (when a TTY is attached) it
+ // logs with colors, but to a file it wouldn't. You can easily implement your
+ // own that implements the `Formatter` interface, see the `README` or included
+ // formatters for examples.
+ Formatter Formatter
+ // The logging level the logger should log at. This is typically (and defaults
+ // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
+ // logged. `logrus.Debug` is useful in
+ Level Level
+ // Used to sync writing to the log.
+ mu sync.Mutex
+}
+
+// Creates a new logger. Configuration should be set by changing `Formatter`,
+// `Out` and `Hooks` directly on the default logger instance. You can also just
+// instantiate your own:
+//
+// var log = &Logger{
+// Out: os.Stderr,
+// Formatter: new(JSONFormatter),
+// Hooks: make(levelHooks),
+// Level: logrus.DebugLevel,
+// }
+//
+// It's recommended to make this a global instance called `log`.
+func New() *Logger {
+ return &Logger{
+ Out: os.Stdout,
+ Formatter: new(TextFormatter),
+ Hooks: make(levelHooks),
+ Level: InfoLevel,
+ }
+}
+
+// Adds a field to the log entry, note that you it doesn't log until you call
+// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
+// Ff you want multiple fields, use `WithFields`.
+func (logger *Logger) WithField(key string, value interface{}) *Entry {
+ return NewEntry(logger).WithField(key, value)
+}
+
+// Adds a struct of fields to the log entry. All it does is call `WithField` for
+// each `Field`.
+func (logger *Logger) WithFields(fields Fields) *Entry {
+ return NewEntry(logger).WithFields(fields)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+ NewEntry(logger).Debugf(format, args...)
+}
+
+func (logger *Logger) Infof(format string, args ...interface{}) {
+ NewEntry(logger).Infof(format, args...)
+}
+
+func (logger *Logger) Printf(format string, args ...interface{}) {
+ NewEntry(logger).Printf(format, args...)
+}
+
+func (logger *Logger) Warnf(format string, args ...interface{}) {
+ NewEntry(logger).Warnf(format, args...)
+}
+
+func (logger *Logger) Warningf(format string, args ...interface{}) {
+ NewEntry(logger).Warnf(format, args...)
+}
+
+func (logger *Logger) Errorf(format string, args ...interface{}) {
+ NewEntry(logger).Errorf(format, args...)
+}
+
+func (logger *Logger) Fatalf(format string, args ...interface{}) {
+ NewEntry(logger).Fatalf(format, args...)
+}
+
+func (logger *Logger) Panicf(format string, args ...interface{}) {
+ NewEntry(logger).Panicf(format, args...)
+}
+
+func (logger *Logger) Debug(args ...interface{}) {
+ NewEntry(logger).Debug(args...)
+}
+
+func (logger *Logger) Info(args ...interface{}) {
+ NewEntry(logger).Info(args...)
+}
+
+func (logger *Logger) Print(args ...interface{}) {
+ NewEntry(logger).Info(args...)
+}
+
+func (logger *Logger) Warn(args ...interface{}) {
+ NewEntry(logger).Warn(args...)
+}
+
+func (logger *Logger) Warning(args ...interface{}) {
+ NewEntry(logger).Warn(args...)
+}
+
+func (logger *Logger) Error(args ...interface{}) {
+ NewEntry(logger).Error(args...)
+}
+
+func (logger *Logger) Fatal(args ...interface{}) {
+ NewEntry(logger).Fatal(args...)
+}
+
+func (logger *Logger) Panic(args ...interface{}) {
+ NewEntry(logger).Panic(args...)
+}
+
+func (logger *Logger) Debugln(args ...interface{}) {
+ NewEntry(logger).Debugln(args...)
+}
+
+func (logger *Logger) Infoln(args ...interface{}) {
+ NewEntry(logger).Infoln(args...)
+}
+
+func (logger *Logger) Println(args ...interface{}) {
+ NewEntry(logger).Println(args...)
+}
+
+func (logger *Logger) Warnln(args ...interface{}) {
+ NewEntry(logger).Warnln(args...)
+}
+
+func (logger *Logger) Warningln(args ...interface{}) {
+ NewEntry(logger).Warnln(args...)
+}
+
+func (logger *Logger) Errorln(args ...interface{}) {
+ NewEntry(logger).Errorln(args...)
+}
+
+func (logger *Logger) Fatalln(args ...interface{}) {
+ NewEntry(logger).Fatalln(args...)
+}
+
+func (logger *Logger) Panicln(args ...interface{}) {
+ NewEntry(logger).Panicln(args...)
+}
diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go
new file mode 100644
index 00000000000..e596691116d
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/logrus.go
@@ -0,0 +1,143 @@
+package logrus
+
+import (
+ "fmt"
+ "log"
+ "strings"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Level type
+type Level uint8
+
+// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+func (level Level) String() string {
+ switch level {
+ case DebugLevel:
+ return "debug"
+ case InfoLevel:
+ return "info"
+ case WarnLevel:
+ return "warning"
+ case ErrorLevel:
+ return "error"
+ case FatalLevel:
+ return "fatal"
+ case PanicLevel:
+ return "panic"
+ }
+
+ return "unknown"
+}
+
+// ParseLevel takes a string level and returns the Logrus log level constant.
+func ParseLevel(lvl string) (Level, error) {
+ switch strings.ToLower(lvl) {
+ case "panic":
+ return PanicLevel, nil
+ case "fatal":
+ return FatalLevel, nil
+ case "error":
+ return ErrorLevel, nil
+ case "warn", "warning":
+ return WarnLevel, nil
+ case "info":
+ return InfoLevel, nil
+ case "debug":
+ return DebugLevel, nil
+ }
+
+ var l Level
+ return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
+}
+
+// A constant exposing all logging levels
+var AllLevels = []Level{
+ PanicLevel,
+ FatalLevel,
+ ErrorLevel,
+ WarnLevel,
+ InfoLevel,
+ DebugLevel,
+}
+
+// These are the different logging levels. You can set the logging level to log
+// on your instance of logger, obtained with `logrus.New()`.
+const (
+ // PanicLevel level, highest level of severity. Logs and then calls panic with the
+ // message passed to Debug, Info, ...
+ PanicLevel Level = iota
+ // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
+ // logging level is set to Panic.
+ FatalLevel
+ // ErrorLevel level. Logs. Used for errors that should definitely be noted.
+ // Commonly used for hooks to send errors to an error tracking service.
+ ErrorLevel
+ // WarnLevel level. Non-critical entries that deserve eyes.
+ WarnLevel
+ // InfoLevel level. General operational entries about what's going on inside the
+ // application.
+ InfoLevel
+ // DebugLevel level. Usually only enabled when debugging. Very verbose logging.
+ DebugLevel
+)
+
+// Won't compile if StdLogger can't be realized by a log.Logger
+var (
+ _ StdLogger = &log.Logger{}
+ _ StdLogger = &Entry{}
+ _ StdLogger = &Logger{}
+)
+
+// StdLogger is what your logrus-enabled library should take, that way
+// it'll accept a stdlib logger and a logrus logger. There's no standard
+// interface, this is the closest we get, unfortunately.
+type StdLogger interface {
+ Print(...interface{})
+ Printf(string, ...interface{})
+ Println(...interface{})
+
+ Fatal(...interface{})
+ Fatalf(string, ...interface{})
+ Fatalln(...interface{})
+
+ Panic(...interface{})
+ Panicf(string, ...interface{})
+ Panicln(...interface{})
+}
+
+// The FieldLogger interface generalizes the Entry and Logger types
+type FieldLogger interface {
+ WithField(key string, value interface{}) *Entry
+ WithFields(fields Fields) *Entry
+ WithError(err error) *Entry
+
+ Debugf(format string, args ...interface{})
+ Infof(format string, args ...interface{})
+ Printf(format string, args ...interface{})
+ Warnf(format string, args ...interface{})
+ Warningf(format string, args ...interface{})
+ Errorf(format string, args ...interface{})
+ Fatalf(format string, args ...interface{})
+ Panicf(format string, args ...interface{})
+
+ Debug(args ...interface{})
+ Info(args ...interface{})
+ Print(args ...interface{})
+ Warn(args ...interface{})
+ Warning(args ...interface{})
+ Error(args ...interface{})
+ Fatal(args ...interface{})
+ Panic(args ...interface{})
+
+ Debugln(args ...interface{})
+ Infoln(args ...interface{})
+ Println(args ...interface{})
+ Warnln(args ...interface{})
+ Warningln(args ...interface{})
+ Errorln(args ...interface{})
+ Fatalln(args ...interface{})
+ Panicln(args ...interface{})
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go~a6943e7452c65be4e907893e3eefdaa186cee610
old mode 100644
new mode 100755
similarity index 100%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go
rename to vendor/github.com/Sirupsen/logrus/logrus.go~a6943e7452c65be4e907893e3eefdaa186cee610
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
similarity index 100%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_bsd.go
rename to vendor/github.com/Sirupsen/logrus/terminal_bsd.go
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_darwin.go b/vendor/github.com/Sirupsen/logrus/terminal_darwin.go
new file mode 100755
index 00000000000..8fe02a4aec1
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_darwin.go
@@ -0,0 +1,12 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_freebsd.go b/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go
old mode 100644
new mode 100755
similarity index 100%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_freebsd.go
rename to vendor/github.com/Sirupsen/logrus/terminal_freebsd.go
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/Sirupsen/logrus/terminal_linux.go
similarity index 100%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_linux.go
rename to vendor/github.com/Sirupsen/logrus/terminal_linux.go
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_linux.go~a6943e7452c65be4e907893e3eefdaa186cee610 b/vendor/github.com/Sirupsen/logrus/terminal_linux.go~a6943e7452c65be4e907893e3eefdaa186cee610
new file mode 100755
index 00000000000..a2c0b40db61
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_linux.go~a6943e7452c65be4e907893e3eefdaa186cee610
@@ -0,0 +1,12 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TCGETS
+
+type Termios syscall.Termios
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
new file mode 100644
index 00000000000..b343b3a3755
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
@@ -0,0 +1,21 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin freebsd openbsd netbsd dragonfly
+
+package logrus
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+ fd := syscall.Stderr
+ var termios Termios
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go~a6943e7452c65be4e907893e3eefdaa186cee610
old mode 100644
new mode 100755
similarity index 91%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.go
rename to vendor/github.com/Sirupsen/logrus/terminal_notwindows.go~a6943e7452c65be4e907893e3eefdaa186cee610
index b8bebc13eea..80edd32377b
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.go
+++ b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go~a6943e7452c65be4e907893e3eefdaa186cee610
@@ -3,7 +3,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build linux darwin freebsd openbsd
+// +build linux,!appengine darwin freebsd openbsd
package logrus
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_openbsd.go b/vendor/github.com/Sirupsen/logrus/terminal_openbsd.go
old mode 100644
new mode 100755
similarity index 99%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_openbsd.go
rename to vendor/github.com/Sirupsen/logrus/terminal_openbsd.go
index af609a53d64..d238bfa0b48
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_openbsd.go
+++ b/vendor/github.com/Sirupsen/logrus/terminal_openbsd.go
@@ -1,3 +1,4 @@
+
package logrus
import "syscall"
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
new file mode 100644
index 00000000000..3e70bf7bf09
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
@@ -0,0 +1,15 @@
+// +build solaris
+
+package logrus
+
+import (
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal() bool {
+ _, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
+ return err == nil
+}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go
new file mode 100644
index 00000000000..0146845d16c
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_windows.go
@@ -0,0 +1,27 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package logrus
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+)
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+ fd := syscall.Stderr
+ var st uint32
+ r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+ return r != 0 && e == 0
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go~a6943e7452c65be4e907893e3eefdaa186cee610
old mode 100644
new mode 100755
similarity index 100%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_windows.go
rename to vendor/github.com/Sirupsen/logrus/terminal_windows.go~a6943e7452c65be4e907893e3eefdaa186cee610
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go
similarity index 84%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.go
rename to vendor/github.com/Sirupsen/logrus/text_formatter.go
index 2e6fe1bdd18..6afd0e031c9 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.go
+++ b/vendor/github.com/Sirupsen/logrus/text_formatter.go
@@ -73,17 +73,20 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
isColorTerminal := isTerminal && (runtime.GOOS != "windows")
isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
- if f.TimestampFormat == "" {
- f.TimestampFormat = DefaultTimestampFormat
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = DefaultTimestampFormat
}
if isColored {
- f.printColored(b, entry, keys)
+ f.printColored(b, entry, keys, timestampFormat)
} else {
if !f.DisableTimestamp {
- f.appendKeyValue(b, "time", entry.Time.Format(f.TimestampFormat))
+ f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
}
f.appendKeyValue(b, "level", entry.Level.String())
- f.appendKeyValue(b, "msg", entry.Message)
+ if entry.Message != "" {
+ f.appendKeyValue(b, "msg", entry.Message)
+ }
for _, key := range keys {
f.appendKeyValue(b, key, entry.Data[key])
}
@@ -93,7 +96,7 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
return b.Bytes(), nil
}
-func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string) {
+func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
var levelColor int
switch entry.Level {
case DebugLevel:
@@ -111,11 +114,11 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
if !f.FullTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
} else {
- fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(f.TimestampFormat), entry.Message)
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
}
for _, k := range keys {
v := entry.Data[k]
- fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v)
+ fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v)
}
}
@@ -125,10 +128,10 @@ func needsQuoting(text string) bool {
(ch >= 'A' && ch <= 'Z') ||
(ch >= '0' && ch <= '9') ||
ch == '-' || ch == '.') {
- return false
+ return true
}
}
- return true
+ return false
}
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
@@ -138,14 +141,14 @@ func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interf
switch value := value.(type) {
case string:
- if needsQuoting(value) {
+ if !needsQuoting(value) {
b.WriteString(value)
} else {
fmt.Fprintf(b, "%q", value)
}
case error:
errmsg := value.Error()
- if needsQuoting(errmsg) {
+ if !needsQuoting(errmsg) {
b.WriteString(errmsg)
} else {
fmt.Fprintf(b, "%q", value)
diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go~a6943e7452c65be4e907893e3eefdaa186cee610 b/vendor/github.com/Sirupsen/logrus/text_formatter.go~a6943e7452c65be4e907893e3eefdaa186cee610
new file mode 100755
index 00000000000..78e78893568
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/text_formatter.go~a6943e7452c65be4e907893e3eefdaa186cee610
@@ -0,0 +1,124 @@
+package logrus
+
+import (
+ "bytes"
+ "fmt"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+)
+
+const (
+ nocolor = 0
+ red = 31
+ green = 32
+ yellow = 33
+ blue = 34
+)
+
+var (
+ baseTimestamp time.Time
+ isTerminal bool
+ noQuoteNeeded *regexp.Regexp
+)
+
+func init() {
+ baseTimestamp = time.Now()
+ isTerminal = IsTerminal()
+}
+
+func miniTS() int {
+ return int(time.Since(baseTimestamp) / time.Second)
+}
+
+type TextFormatter struct {
+ // Set to true to bypass checking for a TTY before outputting colors.
+ ForceColors bool
+ DisableColors bool
+ // Set to true to disable timestamp logging (useful when the output
+ // is redirected to a logging system already adding a timestamp)
+ DisableTimestamp bool
+}
+
+func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
+
+ var keys []string
+ for k := range entry.Data {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ b := &bytes.Buffer{}
+
+ prefixFieldClashes(entry.Data)
+
+ isColored := (f.ForceColors || isTerminal) && !f.DisableColors
+
+ if isColored {
+ printColored(b, entry, keys)
+ } else {
+ if !f.DisableTimestamp {
+ f.appendKeyValue(b, "time", entry.Time.Format(time.RFC3339))
+ }
+ f.appendKeyValue(b, "level", entry.Level.String())
+ f.appendKeyValue(b, "msg", entry.Message)
+ for _, key := range keys {
+ f.appendKeyValue(b, key, entry.Data[key])
+ }
+ }
+
+ b.WriteByte('\n')
+ return b.Bytes(), nil
+}
+
+func printColored(b *bytes.Buffer, entry *Entry, keys []string) {
+ var levelColor int
+ switch entry.Level {
+ case WarnLevel:
+ levelColor = yellow
+ case ErrorLevel, FatalLevel, PanicLevel:
+ levelColor = red
+ default:
+ levelColor = blue
+ }
+
+ levelText := strings.ToUpper(entry.Level.String())[0:4]
+
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
+ for _, k := range keys {
+ v := entry.Data[k]
+ fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v)
+ }
+}
+
+func needsQuoting(text string) bool {
+ for _, ch := range text {
+ if !((ch >= 'a' && ch <= 'z') ||
+ (ch >= 'A' && ch <= 'Z') ||
+ (ch >= '0' && ch < '9') ||
+ ch == '-' || ch == '.') {
+ return false
+ }
+ }
+ return true
+}
+
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key, value interface{}) {
+ switch value.(type) {
+ case string:
+ if needsQuoting(value.(string)) {
+ fmt.Fprintf(b, "%v=%s ", key, value)
+ } else {
+ fmt.Fprintf(b, "%v=%q ", key, value)
+ }
+ case error:
+ if needsQuoting(value.(error).Error()) {
+ fmt.Fprintf(b, "%v=%s ", key, value)
+ } else {
+ fmt.Fprintf(b, "%v=%q ", key, value)
+ }
+ default:
+ fmt.Fprintf(b, "%v=%v ", key, value)
+ }
+}
diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go
new file mode 100644
index 00000000000..f74d2aa5fc6
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/writer.go
@@ -0,0 +1,53 @@
+package logrus
+
+import (
+ "bufio"
+ "io"
+ "runtime"
+)
+
+func (logger *Logger) Writer() *io.PipeWriter {
+ return logger.WriterLevel(InfoLevel)
+}
+
+func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
+ reader, writer := io.Pipe()
+
+ var printFunc func(args ...interface{})
+ switch level {
+ case DebugLevel:
+ printFunc = logger.Debug
+ case InfoLevel:
+ printFunc = logger.Info
+ case WarnLevel:
+ printFunc = logger.Warn
+ case ErrorLevel:
+ printFunc = logger.Error
+ case FatalLevel:
+ printFunc = logger.Fatal
+ case PanicLevel:
+ printFunc = logger.Panic
+ default:
+ printFunc = logger.Print
+ }
+
+ go logger.writerScanner(reader, printFunc)
+ runtime.SetFinalizer(writer, writerFinalizer)
+
+ return writer
+}
+
+func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ printFunc(scanner.Text())
+ }
+ if err := scanner.Err(); err != nil {
+ logger.Errorf("Error while reading from Writer: %s", err)
+ }
+ reader.Close()
+}
+
+func writerFinalizer(writer *io.PipeWriter) {
+ writer.Close()
+}
diff --git a/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/docker/docker/LICENSE
new file mode 100644
index 00000000000..8f3fee627a4
--- /dev/null
+++ b/vendor/github.com/docker/docker/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2013-2016 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE
new file mode 100644
index 00000000000..8a37c1c7bc4
--- /dev/null
+++ b/vendor/github.com/docker/docker/NOTICE
@@ -0,0 +1,19 @@
+Docker
+Copyright 2012-2016 Docker, Inc.
+
+This product includes software developed at Docker, Inc. (https://www.docker.com).
+
+This product contains software (https://github.com/kr/pty) developed
+by Keith Rarick, licensed under the MIT License.
+
+The following is courtesy of our legal counsel:
+
+
+Use and transfer of Docker may be subject to certain restrictions by the
+United States and other governments.
+It is your responsibility to ensure that your use and/or transfer does not
+violate applicable laws.
+
+For more information, please see https://www.bis.doc.gov
+
+See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
diff --git a/vendor/github.com/docker/docker/opts/hosts.go b/vendor/github.com/docker/docker/opts/hosts.go
new file mode 100644
index 00000000000..266df1e5374
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/hosts.go
@@ -0,0 +1,151 @@
+package opts
+
+import (
+ "fmt"
+ "net"
+ "net/url"
+ "strconv"
+ "strings"
+)
+
+var (
+ // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp://
+ // These are the IANA registered port numbers for use with Docker
+ // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker
+ DefaultHTTPPort = 2375 // Default HTTP Port
+ // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled
+ DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port
+ // DefaultUnixSocket Path for the unix socket.
+ // Docker daemon by default always listens on the default unix socket
+ DefaultUnixSocket = "/var/run/docker.sock"
+ // DefaultTCPHost constant defines the default host string used by docker on Windows
+ DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort)
+ // DefaultTLSHost constant defines the default host string used by docker for TLS sockets
+ DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort)
+ // DefaultNamedPipe defines the default named pipe used by docker on Windows
+ DefaultNamedPipe = `//./pipe/docker_engine`
+)
+
+// ValidateHost validates that the specified string is a valid host and returns it.
+func ValidateHost(val string) (string, error) {
+ host := strings.TrimSpace(val)
+ // The empty string means default and is not handled by parseDockerDaemonHost
+ if host != "" {
+ _, err := parseDockerDaemonHost(host)
+ if err != nil {
+ return val, err
+ }
+ }
+ // Note: unlike most flag validators, we don't return the mutated value here
+ // we need to know what the user entered later (using ParseHost) to adjust for tls
+ return val, nil
+}
+
+// ParseHost and set defaults for a Daemon host string
+func ParseHost(defaultToTLS bool, val string) (string, error) {
+ host := strings.TrimSpace(val)
+ if host == "" {
+ if defaultToTLS {
+ host = DefaultTLSHost
+ } else {
+ host = DefaultHost
+ }
+ } else {
+ var err error
+ host, err = parseDockerDaemonHost(host)
+ if err != nil {
+ return val, err
+ }
+ }
+ return host, nil
+}
+
+// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host.
+// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go.
+func parseDockerDaemonHost(addr string) (string, error) {
+ addrParts := strings.SplitN(addr, "://", 2)
+ if len(addrParts) == 1 && addrParts[0] != "" {
+ addrParts = []string{"tcp", addrParts[0]}
+ }
+
+ switch addrParts[0] {
+ case "tcp":
+ return ParseTCPAddr(addrParts[1], DefaultTCPHost)
+ case "unix":
+ return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket)
+ case "npipe":
+ return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe)
+ case "fd":
+ return addr, nil
+ default:
+ return "", fmt.Errorf("Invalid bind address format: %s", addr)
+ }
+}
+
+// parseSimpleProtoAddr parses and validates that the specified address is a valid
+// socket address for simple protocols like unix and npipe. It returns a formatted
+// socket address, either using the address parsed from addr, or the contents of
+// defaultAddr if addr is a blank string.
+func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) {
+ addr = strings.TrimPrefix(addr, proto+"://")
+ if strings.Contains(addr, "://") {
+ return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr)
+ }
+ if addr == "" {
+ addr = defaultAddr
+ }
+ return fmt.Sprintf("%s://%s", proto, addr), nil
+}
+
+// ParseTCPAddr parses and validates that the specified address is a valid TCP
+// address. It returns a formatted TCP address, either using the address parsed
+// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string.
+// tryAddr is expected to have already been Trim()'d
+// defaultAddr must be in the full `tcp://host:port` form
+func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) {
+ if tryAddr == "" || tryAddr == "tcp://" {
+ return defaultAddr, nil
+ }
+ addr := strings.TrimPrefix(tryAddr, "tcp://")
+ if strings.Contains(addr, "://") || addr == "" {
+ return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr)
+ }
+
+ defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://")
+ defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr)
+ if err != nil {
+ return "", err
+ }
+ // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but
+ // not 1.4. See https://github.com/golang/go/issues/12200 and
+ // https://github.com/golang/go/issues/6530.
+ if strings.HasSuffix(addr, "]:") {
+ addr += defaultPort
+ }
+
+ u, err := url.Parse("tcp://" + addr)
+ if err != nil {
+ return "", err
+ }
+ host, port, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ // try port addition once
+ host, port, err = net.SplitHostPort(net.JoinHostPort(u.Host, defaultPort))
+ }
+ if err != nil {
+ return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
+ }
+
+ if host == "" {
+ host = defaultHost
+ }
+ if port == "" {
+ port = defaultPort
+ }
+ p, err := strconv.Atoi(port)
+ if err != nil && p == 0 {
+ return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
+ }
+
+ return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go b/vendor/github.com/docker/docker/opts/hosts_unix.go
similarity index 52%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go
rename to vendor/github.com/docker/docker/opts/hosts_unix.go
index a29335e605a..611407a9d94 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go
+++ b/vendor/github.com/docker/docker/opts/hosts_unix.go
@@ -4,4 +4,5 @@ package opts
import "fmt"
+// DefaultHost constant defines the default host string used by docker on other hosts than Windows
var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket)
diff --git a/vendor/github.com/docker/docker/opts/hosts_windows.go b/vendor/github.com/docker/docker/opts/hosts_windows.go
new file mode 100644
index 00000000000..7c239e00f1e
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/hosts_windows.go
@@ -0,0 +1,6 @@
+// +build windows
+
+package opts
+
+// DefaultHost constant defines the default host string used by docker on Windows
+var DefaultHost = "npipe://" + DefaultNamedPipe
diff --git a/vendor/github.com/docker/docker/opts/ip.go b/vendor/github.com/docker/docker/opts/ip.go
new file mode 100644
index 00000000000..c7b0dc99473
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/ip.go
@@ -0,0 +1,42 @@
+package opts
+
+import (
+ "fmt"
+ "net"
+)
+
+// IPOpt holds an IP. It is used to store values from CLI flags.
+type IPOpt struct {
+ *net.IP
+}
+
+// NewIPOpt creates a new IPOpt from a reference net.IP and a
+// string representation of an IP. If the string is not a valid
+// IP it will fallback to the specified reference.
+func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt {
+ o := &IPOpt{
+ IP: ref,
+ }
+ o.Set(defaultVal)
+ return o
+}
+
+// Set sets an IPv4 or IPv6 address from a given string. If the given
+// string is not parseable as an IP address it returns an error.
+func (o *IPOpt) Set(val string) error {
+ ip := net.ParseIP(val)
+ if ip == nil {
+ return fmt.Errorf("%s is not an ip address", val)
+ }
+ *o.IP = ip
+ return nil
+}
+
+// String returns the IP address stored in the IPOpt. If stored IP is a
+// nil pointer, it returns an empty string.
+func (o *IPOpt) String() string {
+ if *o.IP == nil {
+ return ""
+ }
+ return o.IP.String()
+}
diff --git a/vendor/github.com/docker/docker/opts/opts.go b/vendor/github.com/docker/docker/opts/opts.go
new file mode 100644
index 00000000000..1b9d6b294a8
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/opts.go
@@ -0,0 +1,321 @@
+package opts
+
+import (
+ "fmt"
+ "net"
+ "regexp"
+ "strings"
+
+ "github.com/docker/engine-api/types/filters"
+)
+
+var (
+ alphaRegexp = regexp.MustCompile(`[a-zA-Z]`)
+ domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
+)
+
+// ListOpts holds a list of values and a validation function.
+type ListOpts struct {
+ values *[]string
+ validator ValidatorFctType
+}
+
+// NewListOpts creates a new ListOpts with the specified validator.
+func NewListOpts(validator ValidatorFctType) ListOpts {
+ var values []string
+ return *NewListOptsRef(&values, validator)
+}
+
+// NewListOptsRef creates a new ListOpts with the specified values and validator.
+func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts {
+ return &ListOpts{
+ values: values,
+ validator: validator,
+ }
+}
+
+func (opts *ListOpts) String() string {
+ return fmt.Sprintf("%v", []string((*opts.values)))
+}
+
+// Set validates if needed the input value and adds it to the
+// internal slice.
+func (opts *ListOpts) Set(value string) error {
+ if opts.validator != nil {
+ v, err := opts.validator(value)
+ if err != nil {
+ return err
+ }
+ value = v
+ }
+ (*opts.values) = append((*opts.values), value)
+ return nil
+}
+
+// Delete removes the specified element from the slice.
+func (opts *ListOpts) Delete(key string) {
+ for i, k := range *opts.values {
+ if k == key {
+ (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...)
+ return
+ }
+ }
+}
+
+// GetMap returns the content of values in a map in order to avoid
+// duplicates.
+func (opts *ListOpts) GetMap() map[string]struct{} {
+ ret := make(map[string]struct{})
+ for _, k := range *opts.values {
+ ret[k] = struct{}{}
+ }
+ return ret
+}
+
+// GetAll returns the values of slice.
+func (opts *ListOpts) GetAll() []string {
+ return (*opts.values)
+}
+
+// GetAllOrEmpty returns the values of the slice
+// or an empty slice when there are no values.
+func (opts *ListOpts) GetAllOrEmpty() []string {
+ v := *opts.values
+ if v == nil {
+ return make([]string, 0)
+ }
+ return v
+}
+
+// Get checks the existence of the specified key.
+func (opts *ListOpts) Get(key string) bool {
+ for _, k := range *opts.values {
+ if k == key {
+ return true
+ }
+ }
+ return false
+}
+
+// Len returns the amount of element in the slice.
+func (opts *ListOpts) Len() int {
+ return len((*opts.values))
+}
+
+// Type returns a string name for this Option type
+func (opts *ListOpts) Type() string {
+ return "list"
+}
+
+// NamedOption is an interface that list and map options
+// with names implement.
+type NamedOption interface {
+ Name() string
+}
+
+// NamedListOpts is a ListOpts with a configuration name.
+// This struct is useful to keep reference to the assigned
+// field name in the internal configuration struct.
+type NamedListOpts struct {
+ name string
+ ListOpts
+}
+
+var _ NamedOption = &NamedListOpts{}
+
+// NewNamedListOptsRef creates a reference to a new NamedListOpts struct.
+func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts {
+ return &NamedListOpts{
+ name: name,
+ ListOpts: *NewListOptsRef(values, validator),
+ }
+}
+
+// Name returns the name of the NamedListOpts in the configuration.
+func (o *NamedListOpts) Name() string {
+ return o.name
+}
+
+//MapOpts holds a map of values and a validation function.
+type MapOpts struct {
+ values map[string]string
+ validator ValidatorFctType
+}
+
+// Set validates if needed the input value and add it to the
+// internal map, by splitting on '='.
+func (opts *MapOpts) Set(value string) error {
+ if opts.validator != nil {
+ v, err := opts.validator(value)
+ if err != nil {
+ return err
+ }
+ value = v
+ }
+ vals := strings.SplitN(value, "=", 2)
+ if len(vals) == 1 {
+ (opts.values)[vals[0]] = ""
+ } else {
+ (opts.values)[vals[0]] = vals[1]
+ }
+ return nil
+}
+
+// GetAll returns the values of MapOpts as a map.
+func (opts *MapOpts) GetAll() map[string]string {
+ return opts.values
+}
+
+func (opts *MapOpts) String() string {
+ return fmt.Sprintf("%v", map[string]string((opts.values)))
+}
+
+// Type returns a string name for this Option type
+func (opts *MapOpts) Type() string {
+ return "map"
+}
+
+// NewMapOpts creates a new MapOpts with the specified map of values and a validator.
+func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
+ if values == nil {
+ values = make(map[string]string)
+ }
+ return &MapOpts{
+ values: values,
+ validator: validator,
+ }
+}
+
+// NamedMapOpts is a MapOpts struct with a configuration name.
+// This struct is useful to keep reference to the assigned
+// field name in the internal configuration struct.
+type NamedMapOpts struct {
+ name string
+ MapOpts
+}
+
+var _ NamedOption = &NamedMapOpts{}
+
+// NewNamedMapOpts creates a reference to a new NamedMapOpts struct.
+func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts {
+ return &NamedMapOpts{
+ name: name,
+ MapOpts: *NewMapOpts(values, validator),
+ }
+}
+
+// Name returns the name of the NamedMapOpts in the configuration.
+func (o *NamedMapOpts) Name() string {
+ return o.name
+}
+
+// ValidatorFctType defines a validator function that returns a validated string and/or an error.
+type ValidatorFctType func(val string) (string, error)
+
+// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error
+type ValidatorFctListType func(val string) ([]string, error)
+
+// ValidateIPAddress validates an Ip address.
+func ValidateIPAddress(val string) (string, error) {
+ var ip = net.ParseIP(strings.TrimSpace(val))
+ if ip != nil {
+ return ip.String(), nil
+ }
+ return "", fmt.Errorf("%s is not an ip address", val)
+}
+
+// ValidateDNSSearch validates domain for resolvconf search configuration.
+// A zero length domain is represented by a dot (.).
+func ValidateDNSSearch(val string) (string, error) {
+ if val = strings.Trim(val, " "); val == "." {
+ return val, nil
+ }
+ return validateDomain(val)
+}
+
+func validateDomain(val string) (string, error) {
+ if alphaRegexp.FindString(val) == "" {
+ return "", fmt.Errorf("%s is not a valid domain", val)
+ }
+ ns := domainRegexp.FindSubmatch([]byte(val))
+ if len(ns) > 0 && len(ns[1]) < 255 {
+ return string(ns[1]), nil
+ }
+ return "", fmt.Errorf("%s is not a valid domain", val)
+}
+
+// ValidateLabel validates that the specified string is a valid label, and returns it.
+// Labels are in the form on key=value.
+func ValidateLabel(val string) (string, error) {
+ if strings.Count(val, "=") < 1 {
+ return "", fmt.Errorf("bad attribute format: %s", val)
+ }
+ return val, nil
+}
+
+// ValidateSysctl validates a sysctl and returns it.
+func ValidateSysctl(val string) (string, error) {
+ validSysctlMap := map[string]bool{
+ "kernel.msgmax": true,
+ "kernel.msgmnb": true,
+ "kernel.msgmni": true,
+ "kernel.sem": true,
+ "kernel.shmall": true,
+ "kernel.shmmax": true,
+ "kernel.shmmni": true,
+ "kernel.shm_rmid_forced": true,
+ }
+ validSysctlPrefixes := []string{
+ "net.",
+ "fs.mqueue.",
+ }
+ arr := strings.Split(val, "=")
+ if len(arr) < 2 {
+ return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
+ }
+ if validSysctlMap[arr[0]] {
+ return val, nil
+ }
+
+ for _, vp := range validSysctlPrefixes {
+ if strings.HasPrefix(arr[0], vp) {
+ return val, nil
+ }
+ }
+ return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
+}
+
+// FilterOpt is a flag type for validating filters
+type FilterOpt struct {
+ filter filters.Args
+}
+
+// NewFilterOpt returns a new FilterOpt
+func NewFilterOpt() FilterOpt {
+ return FilterOpt{filter: filters.NewArgs()}
+}
+
+func (o *FilterOpt) String() string {
+ repr, err := filters.ToParam(o.filter)
+ if err != nil {
+ return "invalid filters"
+ }
+ return repr
+}
+
+// Set sets the value of the opt by parsing the command line value
+func (o *FilterOpt) Set(value string) error {
+ var err error
+ o.filter, err = filters.ParseFlag(value, o.filter)
+ return err
+}
+
+// Type returns the option type
+func (o *FilterOpt) Type() string {
+ return "filter"
+}
+
+// Value returns the value of this option
+func (o *FilterOpt) Value() filters.Args {
+ return o.filter
+}
diff --git a/vendor/github.com/docker/docker/opts/opts_unix.go b/vendor/github.com/docker/docker/opts/opts_unix.go
new file mode 100644
index 00000000000..f1ce844a8f6
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/opts_unix.go
@@ -0,0 +1,6 @@
+// +build !windows
+
+package opts
+
+// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080
+const DefaultHTTPHost = "localhost"
diff --git a/vendor/github.com/docker/docker/opts/opts_windows.go b/vendor/github.com/docker/docker/opts/opts_windows.go
new file mode 100644
index 00000000000..ebe40c969c9
--- /dev/null
+++ b/vendor/github.com/docker/docker/opts/opts_windows.go
@@ -0,0 +1,56 @@
+package opts
+
+// TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5.
+// @jhowardmsft, @swernli.
+//
+// On Windows, this mitigates a problem with the default options of running
+// a docker client against a local docker daemon on TP5.
+//
+// What was found that if the default host is "localhost", even if the client
+// (and daemon as this is local) is not physically on a network, and the DNS
+// cache is flushed (ipconfig /flushdns), then the client will pause for
+// exactly one second when connecting to the daemon for calls. For example
+// using docker run windowsservercore cmd, the CLI will send a create followed
+// by an attach. You see the delay between the attach finishing and the attach
+// being seen by the daemon.
+//
+// Here's some daemon debug logs with additional debug spew put in. The
+// AfterWriteJSON log is the very last thing the daemon does as part of the
+// create call. The POST /attach is the second CLI call. Notice the second
+// time gap.
+//
+// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs"
+// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig"
+// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...."
+// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking....
+// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...."
+// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...."
+// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func"
+// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create"
+// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2"
+// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate"
+// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON"
+// ... 1 second gap here....
+// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach"
+// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1"
+//
+// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change
+// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory,
+// the Windows networking stack is supposed to resolve "localhost" internally,
+// without hitting DNS, or even reading the hosts file (which is why localhost
+// is commented out in the hosts file on Windows).
+//
+// We have validated that working around this using the actual IPv4 localhost
+// address does not cause the delay.
+//
+// This does not occur with the docker client built with 1.4.3 on the same
+// Windows build, regardless of whether the daemon is built using 1.5.1
+// or 1.4.3. It does not occur on Linux. We also verified we see the same thing
+// on a cross-compiled Windows binary (from Linux).
+//
+// Final note: This is a mitigation, not a 'real' fix. It is still susceptible
+// to the delay if a user were to do 'docker run -H=tcp://localhost:2375...'
+// explicitly.
+
+// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080
+const DefaultHTTPHost = "127.0.0.1"
diff --git a/vendor/github.com/docker/docker/pkg/README.md b/vendor/github.com/docker/docker/pkg/README.md
new file mode 100644
index 00000000000..c4b78a8ad84
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/README.md
@@ -0,0 +1,11 @@
+pkg/ is a collection of utility packages used by the Docker project without being specific to its internals.
+
+Utility packages are kept separate from the docker core codebase to keep it as small and concise as possible.
+If some utilities grow larger and their APIs stabilize, they may be moved to their own repository under the
+Docker organization, to facilitate re-use by other projects. However that is not the priority.
+
+The directory `pkg` is named after the same directory in the camlistore project. Since Brad is a core
+Go maintainer, we thought it made sense to copy his methods for organizing Go code :) Thanks Brad!
+
+Because utility packages are small and neatly separated from the rest of the codebase, they are a good
+place to start for aspiring maintainers and contributors. Get in touch if you want to help maintain them!
diff --git a/vendor/github.com/docker/docker/pkg/aaparser/aaparser.go b/vendor/github.com/docker/docker/pkg/aaparser/aaparser.go
new file mode 100644
index 00000000000..507298f42a9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/aaparser/aaparser.go
@@ -0,0 +1,92 @@
+// Package aaparser is a convenience package interacting with `apparmor_parser`.
+package aaparser
+
+import (
+ "fmt"
+ "os/exec"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+const (
+ binary = "apparmor_parser"
+)
+
+// GetVersion returns the major and minor version of apparmor_parser.
+func GetVersion() (int, error) {
+ output, err := cmd("", "--version")
+ if err != nil {
+ return -1, err
+ }
+
+ return parseVersion(output)
+}
+
+// LoadProfile runs `apparmor_parser -r -W` on a specified apparmor profile to
+// replace and write it to disk.
+func LoadProfile(profilePath string) error {
+ _, err := cmd(filepath.Dir(profilePath), "-r", "-W", filepath.Base(profilePath))
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// cmd runs `apparmor_parser` with the passed arguments.
+func cmd(dir string, arg ...string) (string, error) {
+ c := exec.Command(binary, arg...)
+ c.Dir = dir
+
+ output, err := c.CombinedOutput()
+ if err != nil {
+ return "", fmt.Errorf("running `%s %s` failed with output: %s\nerror: %v", c.Path, strings.Join(c.Args, " "), string(output), err)
+ }
+
+ return string(output), nil
+}
+
+// parseVersion takes the output from `apparmor_parser --version` and returns
+// a representation of the {major, minor, patch} version as a single number of
+// the form MMmmPPP {major, minor, patch}.
+func parseVersion(output string) (int, error) {
+ // output is in the form of the following:
+ // AppArmor parser version 2.9.1
+ // Copyright (C) 1999-2008 Novell Inc.
+ // Copyright 2009-2012 Canonical Ltd.
+
+ lines := strings.SplitN(output, "\n", 2)
+ words := strings.Split(lines[0], " ")
+ version := words[len(words)-1]
+
+ // split by major minor version
+ v := strings.Split(version, ".")
+ if len(v) == 0 || len(v) > 3 {
+ return -1, fmt.Errorf("parsing version failed for output: `%s`", output)
+ }
+
+ // Default the versions to 0.
+ var majorVersion, minorVersion, patchLevel int
+
+ majorVersion, err := strconv.Atoi(v[0])
+ if err != nil {
+ return -1, err
+ }
+
+ if len(v) > 1 {
+ minorVersion, err = strconv.Atoi(v[1])
+ if err != nil {
+ return -1, err
+ }
+ }
+ if len(v) > 2 {
+ patchLevel, err = strconv.Atoi(v[2])
+ if err != nil {
+ return -1, err
+ }
+ }
+
+ // major*10^5 + minor*10^3 + patch*10^0
+ numericVersion := majorVersion*1e5 + minorVersion*1e3 + patchLevel
+ return numericVersion, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/aaparser/aaparser_test.go b/vendor/github.com/docker/docker/pkg/aaparser/aaparser_test.go
new file mode 100644
index 00000000000..69bc8d2fd81
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/aaparser/aaparser_test.go
@@ -0,0 +1,73 @@
+package aaparser
+
+import (
+ "testing"
+)
+
+type versionExpected struct {
+ output string
+ version int
+}
+
+func TestParseVersion(t *testing.T) {
+ versions := []versionExpected{
+ {
+ output: `AppArmor parser version 2.10
+Copyright (C) 1999-2008 Novell Inc.
+Copyright 2009-2012 Canonical Ltd.
+
+`,
+ version: 210000,
+ },
+ {
+ output: `AppArmor parser version 2.8
+Copyright (C) 1999-2008 Novell Inc.
+Copyright 2009-2012 Canonical Ltd.
+
+`,
+ version: 208000,
+ },
+ {
+ output: `AppArmor parser version 2.20
+Copyright (C) 1999-2008 Novell Inc.
+Copyright 2009-2012 Canonical Ltd.
+
+`,
+ version: 220000,
+ },
+ {
+ output: `AppArmor parser version 2.05
+Copyright (C) 1999-2008 Novell Inc.
+Copyright 2009-2012 Canonical Ltd.
+
+`,
+ version: 205000,
+ },
+ {
+ output: `AppArmor parser version 2.9.95
+Copyright (C) 1999-2008 Novell Inc.
+Copyright 2009-2012 Canonical Ltd.
+
+`,
+ version: 209095,
+ },
+ {
+ output: `AppArmor parser version 3.14.159
+Copyright (C) 1999-2008 Novell Inc.
+Copyright 2009-2012 Canonical Ltd.
+
+`,
+ version: 314159,
+ },
+ }
+
+ for _, v := range versions {
+ version, err := parseVersion(v.output)
+ if err != nil {
+ t.Fatalf("expected error to be nil for %#v, got: %v", v, err)
+ }
+ if version != v.version {
+ t.Fatalf("expected version to be %d, was %d, for: %#v\n", v.version, version, v)
+ }
+ }
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md b/vendor/github.com/docker/docker/pkg/archive/README.md
similarity index 100%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md
rename to vendor/github.com/docker/docker/pkg/archive/README.md
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go
similarity index 63%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go
rename to vendor/github.com/docker/docker/pkg/archive/archive.go
index 7306840b66b..ad3d65b2fc6 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go
+++ b/vendor/github.com/docker/docker/pkg/archive/archive.go
@@ -17,37 +17,58 @@ import (
"strings"
"syscall"
- "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
- "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils"
- "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools"
- "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise"
- "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/fileutils"
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/ioutils"
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/promise"
+ "github.com/docker/docker/pkg/system"
)
type (
- Archive io.ReadCloser
- ArchiveReader io.Reader
- Compression int
+ // Archive is a type of io.ReadCloser which has two interfaces Read and Closer.
+ Archive io.ReadCloser
+ // Reader is a type of io.Reader.
+ Reader io.Reader
+ // Compression is the state represents if compressed or not.
+ Compression int
+ // WhiteoutFormat is the format of whiteouts unpacked
+ WhiteoutFormat int
+ // TarChownOptions wraps the chown options UID and GID.
TarChownOptions struct {
UID, GID int
}
+ // TarOptions wraps the tar options.
TarOptions struct {
IncludeFiles []string
ExcludePatterns []string
Compression Compression
NoLchown bool
+ UIDMaps []idtools.IDMap
+ GIDMaps []idtools.IDMap
ChownOpts *TarChownOptions
- Name string
IncludeSourceDir bool
+ // WhiteoutFormat is the expected on disk format for whiteout files.
+ // This format will be converted to the standard format on pack
+ // and from the standard format on unpack.
+ WhiteoutFormat WhiteoutFormat
// When unpacking, specifies whether overwriting a directory with a
// non-directory is allowed and vice versa.
NoOverwriteDirNonDir bool
+ // For each include when creating an archive, the included name will be
+ // replaced with the matching name from this map.
+ RebaseNames map[string]string
}
// Archiver allows the reuse of most utility functions of this package
- // with a pluggable Untar function.
+ // with a pluggable Untar function. Also, to facilitate the passing of
+ // specific id mappings for untar, an archiver can be created with maps
+ // which will then be passed to Untar operations
Archiver struct {
- Untar func(io.Reader, string, *TarOptions) error
+ Untar func(io.Reader, string, *TarOptions) error
+ UIDMaps []idtools.IDMap
+ GIDMaps []idtools.IDMap
}
// breakoutError is used to differentiate errors related to breaking out
@@ -57,17 +78,37 @@ type (
)
var (
+ // ErrNotImplemented is the error message of function not implemented.
ErrNotImplemented = errors.New("Function not implemented")
- defaultArchiver = &Archiver{Untar}
+ defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil}
)
const (
+ // HeaderSize is the size in bytes of a tar header
+ HeaderSize = 512
+)
+
+const (
+ // Uncompressed represents the uncompressed.
Uncompressed Compression = iota
+ // Bzip2 is bzip2 compression algorithm.
Bzip2
+ // Gzip is gzip compression algorithm.
Gzip
+ // Xz is xz compression algorithm.
Xz
)
+const (
+ // AUFSWhiteoutFormat is the default format for whiteouts
+ AUFSWhiteoutFormat WhiteoutFormat = iota
+ // OverlayWhiteoutFormat formats whiteout according to the overlay
+ // standard.
+ OverlayWhiteoutFormat
+)
+
+// IsArchive checks for the magic bytes of a tar or any supported compression
+// algorithm.
func IsArchive(header []byte) bool {
compression := DetectCompression(header)
if compression != Uncompressed {
@@ -78,6 +119,24 @@ func IsArchive(header []byte) bool {
return err == nil
}
+// IsArchivePath checks if the (possibly compressed) file at the given path
+// starts with a tar file header.
+func IsArchivePath(path string) bool {
+ file, err := os.Open(path)
+ if err != nil {
+ return false
+ }
+ defer file.Close()
+ rdr, err := DecompressStream(file)
+ if err != nil {
+ return false
+ }
+ r := tar.NewReader(rdr)
+ _, err = r.Next()
+ return err == nil
+}
+
+// DetectCompression detects the compression algorithm of the source.
func DetectCompression(source []byte) Compression {
for compression, m := range map[Compression][]byte{
Bzip2: {0x42, 0x5A, 0x68},
@@ -85,7 +144,7 @@ func DetectCompression(source []byte) Compression {
Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
} {
if len(source) < len(m) {
- logrus.Debugf("Len too short")
+ logrus.Debug("Len too short")
continue
}
if bytes.Compare(m, source[:len(m)]) == 0 {
@@ -95,17 +154,24 @@ func DetectCompression(source []byte) Compression {
return Uncompressed
}
-func xzDecompress(archive io.Reader) (io.ReadCloser, error) {
+func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) {
args := []string{"xz", "-d", "-c", "-q"}
- return CmdStream(exec.Command(args[0], args[1:]...), archive)
+ return cmdStream(exec.Command(args[0], args[1:]...), archive)
}
+// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
p := pools.BufioReader32KPool
buf := p.Get(archive)
bs, err := buf.Peek(10)
- if err != nil {
+ if err != nil && err != io.EOF {
+ // Note: we'll ignore any io.EOF error because there are some odd
+ // cases where the layer.tar file will be empty (zero bytes) and
+ // that results in an io.EOF from the Peek() call. So, in those
+ // cases we'll just treat it as a non-compressed stream and
+ // that means just create an empty layer.
+ // See Issue 18170
return nil, err
}
@@ -126,18 +192,22 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader)
return readBufWrapper, nil
case Xz:
- xzReader, err := xzDecompress(buf)
+ xzReader, chdone, err := xzDecompress(buf)
if err != nil {
return nil, err
}
readBufWrapper := p.NewReadCloserWrapper(buf, xzReader)
- return readBufWrapper, nil
+ return ioutils.NewReadCloserWrapper(readBufWrapper, func() error {
+ <-chdone
+ return readBufWrapper.Close()
+ }), nil
default:
return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension())
}
}
-func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) {
+// CompressStream compresseses the dest with specified compression algorithm.
+func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
p := pools.BufioWriter32KPool
buf := p.Get(dest)
switch compression {
@@ -157,6 +227,7 @@ func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteClose
}
}
+// Extension returns the extension of a file that uses the specified compression algorithm.
func (compression *Compression) Extension() string {
switch *compression {
case Uncompressed:
@@ -171,12 +242,25 @@ func (compression *Compression) Extension() string {
return ""
}
+type tarWhiteoutConverter interface {
+ ConvertWrite(*tar.Header, string, os.FileInfo) error
+ ConvertRead(*tar.Header, string) (bool, error)
+}
+
type tarAppender struct {
TarWriter *tar.Writer
Buffer *bufio.Writer
// for hardlink mapping
SeenFiles map[uint64]string
+ UIDMaps []idtools.IDMap
+ GIDMaps []idtools.IDMap
+
+ // For packing and unpacking whiteout files in the
+ // non standard format. The whiteout files defined
+ // by the AUFS standard are used as the tar whiteout
+ // standard.
+ WhiteoutConverter tarWhiteoutConverter
}
// canonicalTarName provides a platform-independent and consistent posix-style
@@ -194,6 +278,7 @@ func canonicalTarName(name string, isDir bool) (string, error) {
return name, nil
}
+// addTarFile adds to the tar archive a file from `path` as `name`
func (ta *tarAppender) addTarFile(path, name string) error {
fi, err := os.Lstat(path)
if err != nil {
@@ -219,14 +304,14 @@ func (ta *tarAppender) addTarFile(path, name string) error {
}
hdr.Name = name
- nlink, inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys())
+ inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys())
if err != nil {
return err
}
- // if it's a regular file and has more than 1 link,
+ // if it's not a directory and has more than 1 link,
// it's hardlinked, so set the type flag accordingly
- if fi.Mode().IsRegular() && nlink > 1 {
+ if !fi.IsDir() && hasHardlinks(fi) {
// a link should have a name that it links too
// and that linked name should be first in the tar archive
if oldpath, ok := ta.SeenFiles[inode]; ok {
@@ -244,11 +329,37 @@ func (ta *tarAppender) addTarFile(path, name string) error {
hdr.Xattrs["security.capability"] = string(capability)
}
+ //handle re-mapping container ID mappings back to host ID mappings before
+ //writing tar headers/files. We skip whiteout files because they were written
+ //by the kernel and already have proper ownership relative to the host
+ if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && (ta.UIDMaps != nil || ta.GIDMaps != nil) {
+ uid, gid, err := getFileUIDGID(fi.Sys())
+ if err != nil {
+ return err
+ }
+ xUID, err := idtools.ToContainer(uid, ta.UIDMaps)
+ if err != nil {
+ return err
+ }
+ xGID, err := idtools.ToContainer(gid, ta.GIDMaps)
+ if err != nil {
+ return err
+ }
+ hdr.Uid = xUID
+ hdr.Gid = xGID
+ }
+
+ if ta.WhiteoutConverter != nil {
+ if err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi); err != nil {
+ return err
+ }
+ }
+
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
return err
}
- if hdr.Typeflag == tar.TypeReg {
+ if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
file, err := os.Open(path)
if err != nil {
return err
@@ -329,7 +440,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
}
case tar.TypeXGlobalHeader:
- logrus.Debugf("PAX Global Extended Headers found and ignored")
+ logrus.Debug("PAX Global Extended Headers found and ignored")
return nil
default:
@@ -346,10 +457,26 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
}
}
+ var errors []string
for key, value := range hdr.Xattrs {
if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
+ if err == syscall.ENOTSUP {
+ // We ignore errors here because not all graphdrivers support
+ // xattrs *cough* old versions of AUFS *cough*. However only
+ // ENOTSUP should be emitted in that case, otherwise we still
+ // bail.
+ errors = append(errors, err.Error())
+ continue
+ }
return err
}
+
+ }
+
+ if len(errors) > 0 {
+ logrus.WithFields(logrus.Fields{
+ "errors": errors,
+ }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them")
}
// There is no LChmod, so ignore mode for symlink. Also, this
@@ -358,19 +485,25 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
return err
}
- ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
- // syscall.UtimesNano doesn't support a NOFOLLOW flag atm
+ aTime := hdr.AccessTime
+ if aTime.Before(hdr.ModTime) {
+ // Last access time should never be before last modified time.
+ aTime = hdr.ModTime
+ }
+
+ // system.Chtimes doesn't support a NOFOLLOW flag atm
if hdr.Typeflag == tar.TypeLink {
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
- if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
+ if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
return err
}
}
} else if hdr.Typeflag != tar.TypeSymlink {
- if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
+ if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil {
return err
}
} else {
+ ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)}
if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform {
return err
}
@@ -388,6 +521,10 @@ func Tar(path string, compression Compression) (io.ReadCloser, error) {
// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
+ // Fix the source path to work with long path names. This is a no-op
+ // on platforms other than Windows.
+ srcPath = fixVolumePathPrefix(srcPath)
+
patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns)
if err != nil {
@@ -403,21 +540,24 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
go func() {
ta := &tarAppender{
- TarWriter: tar.NewWriter(compressWriter),
- Buffer: pools.BufioWriter32KPool.Get(nil),
- SeenFiles: make(map[uint64]string),
+ TarWriter: tar.NewWriter(compressWriter),
+ Buffer: pools.BufioWriter32KPool.Get(nil),
+ SeenFiles: make(map[uint64]string),
+ UIDMaps: options.UIDMaps,
+ GIDMaps: options.GIDMaps,
+ WhiteoutConverter: getWhiteoutConverter(options.WhiteoutFormat),
}
defer func() {
// Make sure to check the error on Close.
if err := ta.TarWriter.Close(); err != nil {
- logrus.Debugf("Can't close tar writer: %s", err)
+ logrus.Errorf("Can't close tar writer: %s", err)
}
if err := compressWriter.Close(); err != nil {
- logrus.Debugf("Can't close compress writer: %s", err)
+ logrus.Errorf("Can't close compress writer: %s", err)
}
if err := pipeWriter.Close(); err != nil {
- logrus.Debugf("Can't close pipe writer: %s", err)
+ logrus.Errorf("Can't close pipe writer: %s", err)
}
}()
@@ -454,14 +594,13 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
seen := make(map[string]bool)
- var renamedRelFilePath string // For when tar.Options.Name is set
for _, include := range options.IncludeFiles {
- // We can't use filepath.Join(srcPath, include) because this will
- // clean away a trailing "." or "/" which may be important.
- walkRoot := strings.Join([]string{srcPath, include}, string(filepath.Separator))
+ rebaseName := options.RebaseNames[include]
+
+ walkRoot := getWalkRoot(srcPath, include)
filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
if err != nil {
- logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err)
+ logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err)
return nil
}
@@ -486,16 +625,42 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
if include != relFilePath {
skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs)
if err != nil {
- logrus.Debugf("Error matching %s: %v", relFilePath, err)
+ logrus.Errorf("Error matching %s: %v", relFilePath, err)
return err
}
}
if skip {
- if !exceptions && f.IsDir() {
+ // If we want to skip this file and its a directory
+ // then we should first check to see if there's an
+ // excludes pattern (eg !dir/file) that starts with this
+ // dir. If so then we can't skip this dir.
+
+ // Its not a dir then so we can just return/skip.
+ if !f.IsDir() {
+ return nil
+ }
+
+ // No exceptions (!...) in patterns so just skip dir
+ if !exceptions {
return filepath.SkipDir
}
- return nil
+
+ dirSlash := relFilePath + string(filepath.Separator)
+
+ for _, pat := range patterns {
+ if pat[0] != '!' {
+ continue
+ }
+ pat = pat[1:] + string(filepath.Separator)
+ if strings.HasPrefix(pat, dirSlash) {
+ // found a match - so can't skip this dir
+ return nil
+ }
+ }
+
+ // No matching exclusion dir so just skip dir
+ return filepath.SkipDir
}
if seen[relFilePath] {
@@ -503,18 +668,25 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
}
seen[relFilePath] = true
- // TODO Windows: Verify if this needs to be os.Pathseparator
- // Rename the base resource
- if options.Name != "" && filePath == srcPath+"/"+filepath.Base(relFilePath) {
- renamedRelFilePath = relFilePath
- }
- // Set this to make sure the items underneath also get renamed
- if options.Name != "" {
- relFilePath = strings.Replace(relFilePath, renamedRelFilePath, options.Name, 1)
+ // Rename the base resource.
+ if rebaseName != "" {
+ var replacement string
+ if rebaseName != string(filepath.Separator) {
+ // Special case the root directory to replace with an
+ // empty string instead so that we don't end up with
+ // double slashes in the paths.
+ replacement = rebaseName
+ }
+
+ relFilePath = strings.Replace(relFilePath, include, replacement, 1)
}
if err := ta.addTarFile(filePath, relFilePath); err != nil {
- logrus.Debugf("Can't add file %s to tar: %s", filePath, err)
+ logrus.Errorf("Can't add file %s to tar: %s", filePath, err)
+ // if pipe is broken, stop writing tar stream to it
+ if err == io.ErrClosedPipe {
+ return err
+ }
}
return nil
})
@@ -524,12 +696,18 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
return pipeReader, nil
}
+// Unpack unpacks the decompressedArchive to dest with options.
func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error {
tr := tar.NewReader(decompressedArchive)
trBuf := pools.BufioReader32KPool.Get(nil)
defer pools.BufioReader32KPool.Put(trBuf)
var dirs []*tar.Header
+ remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
+ if err != nil {
+ return err
+ }
+ whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat)
// Iterate through the files in the archive.
loop:
@@ -562,7 +740,7 @@ loop:
parent := filepath.Dir(hdr.Name)
parentPath := filepath.Join(dest, parent)
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
- err = system.MkdirAll(parentPath, 0777)
+ err = idtools.MkdirAllNewAs(parentPath, 0777, remappedRootUID, remappedRootGID)
if err != nil {
return err
}
@@ -607,6 +785,38 @@ loop:
}
trBuf.Reset(tr)
+ // if the options contain a uid & gid maps, convert header uid/gid
+ // entries using the maps such that lchown sets the proper mapped
+ // uid/gid after writing the file. We only perform this mapping if
+ // the file isn't already owned by the remapped root UID or GID, as
+ // that specific uid/gid has no mapping from container -> host, and
+ // those files already have the proper ownership for inside the
+ // container.
+ if hdr.Uid != remappedRootUID {
+ xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps)
+ if err != nil {
+ return err
+ }
+ hdr.Uid = xUID
+ }
+ if hdr.Gid != remappedRootGID {
+ xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps)
+ if err != nil {
+ return err
+ }
+ hdr.Gid = xGID
+ }
+
+ if whiteoutConverter != nil {
+ writeFile, err := whiteoutConverter.ConvertRead(hdr, path)
+ if err != nil {
+ return err
+ }
+ if !writeFile {
+ continue
+ }
+ }
+
if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil {
return err
}
@@ -620,8 +830,8 @@ loop:
for _, hdr := range dirs {
path := filepath.Join(dest, hdr.Name)
- ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
- if err := syscall.UtimesNano(path, ts); err != nil {
+
+ if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
return err
}
}
@@ -637,7 +847,7 @@ func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
return untarHandler(tarArchive, dest, options, true)
}
-// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
+// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
// and unpacks it into the directory at `dest`.
// The archive must be an uncompressed stream.
func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error {
@@ -657,7 +867,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decomp
options.ExcludePatterns = []string{}
}
- var r io.Reader = tarArchive
+ r := tarArchive
if decompress {
decompressedArchive, err := DecompressStream(tarArchive)
if err != nil {
@@ -670,6 +880,8 @@ func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decomp
return Unpack(r, dest, options)
}
+// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
+// If either Tar or Untar fails, TarUntar aborts and returns the error.
func (archiver *Archiver) TarUntar(src, dst string) error {
logrus.Debugf("TarUntar(%s %s)", src, dst)
archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed})
@@ -677,7 +889,15 @@ func (archiver *Archiver) TarUntar(src, dst string) error {
return err
}
defer archive.Close()
- return archiver.Untar(archive, dst, nil)
+
+ var options *TarOptions
+ if archiver.UIDMaps != nil || archiver.GIDMaps != nil {
+ options = &TarOptions{
+ UIDMaps: archiver.UIDMaps,
+ GIDMaps: archiver.GIDMaps,
+ }
+ }
+ return archiver.Untar(archive, dst, options)
}
// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
@@ -686,16 +906,21 @@ func TarUntar(src, dst string) error {
return defaultArchiver.TarUntar(src, dst)
}
+// UntarPath untar a file from path to a destination, src is the source tar file path.
func (archiver *Archiver) UntarPath(src, dst string) error {
archive, err := os.Open(src)
if err != nil {
return err
}
defer archive.Close()
- if err := archiver.Untar(archive, dst, nil); err != nil {
- return err
+ var options *TarOptions
+ if archiver.UIDMaps != nil || archiver.GIDMaps != nil {
+ options = &TarOptions{
+ UIDMaps: archiver.UIDMaps,
+ GIDMaps: archiver.GIDMaps,
+ }
}
- return nil
+ return archiver.Untar(archive, dst, options)
}
// UntarPath is a convenience function which looks for an archive
@@ -704,6 +929,10 @@ func UntarPath(src, dst string) error {
return defaultArchiver.UntarPath(src, dst)
}
+// CopyWithTar creates a tar archive of filesystem path `src`, and
+// unpacks it at filesystem path `dst`.
+// The archive is streamed directly with fixed buffering and no
+// intermediary disk IO.
func (archiver *Archiver) CopyWithTar(src, dst string) error {
srcSt, err := os.Stat(src)
if err != nil {
@@ -712,9 +941,17 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error {
if !srcSt.IsDir() {
return archiver.CopyFileWithTar(src, dst)
}
+
+ // if this archiver is set up with ID mapping we need to create
+ // the new destination directory with the remapped root UID/GID pair
+ // as owner
+ rootUID, rootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps)
+ if err != nil {
+ return err
+ }
// Create dst, copy src's content into it
logrus.Debugf("Creating dest directory: %s", dst)
- if err := system.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) {
+ if err := idtools.MkdirAllNewAs(dst, 0755, rootUID, rootGID); err != nil {
return err
}
logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
@@ -729,6 +966,9 @@ func CopyWithTar(src, dst string) error {
return defaultArchiver.CopyWithTar(src, dst)
}
+// CopyFileWithTar emulates the behavior of the 'cp' command-line
+// for a single file. It copies a regular file from path `src` to
+// path `dst`, and preserves all its metadata.
func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst)
srcSt, err := os.Stat(src)
@@ -746,7 +986,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
dst = filepath.Join(dst, filepath.Base(src))
}
// Create the holding directory if necessary
- if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) {
+ if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil {
return err
}
@@ -767,6 +1007,28 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
hdr.Name = filepath.Base(dst)
hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
+ remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps)
+ if err != nil {
+ return err
+ }
+
+ // only perform mapping if the file being copied isn't already owned by the
+ // uid or gid of the remapped root in the container
+ if remappedRootUID != hdr.Uid {
+ xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps)
+ if err != nil {
+ return err
+ }
+ hdr.Uid = xUID
+ }
+ if remappedRootGID != hdr.Gid {
+ xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps)
+ if err != nil {
+ return err
+ }
+ hdr.Gid = xGID
+ }
+
tw := tar.NewWriter(w)
defer tw.Close()
if err := tw.WriteHeader(hdr); err != nil {
@@ -782,7 +1044,12 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
err = er
}
}()
- return archiver.Untar(r, filepath.Dir(dst), nil)
+
+ err = archiver.Untar(r, filepath.Dir(dst), nil)
+ if err != nil {
+ r.CloseWithError(err)
+ }
+ return err
}
// CopyFileWithTar emulates the behavior of the 'cp' command-line
@@ -797,57 +1064,33 @@ func CopyFileWithTar(src, dst string) (err error) {
return defaultArchiver.CopyFileWithTar(src, dst)
}
-// CmdStream executes a command, and returns its stdout as a stream.
+// cmdStream executes a command, and returns its stdout as a stream.
// If the command fails to run or doesn't complete successfully, an error
// will be returned, including anything written on stderr.
-func CmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) {
- if input != nil {
- stdin, err := cmd.StdinPipe()
- if err != nil {
- return nil, err
- }
- // Write stdin if any
- go func() {
- io.Copy(stdin, input)
- stdin.Close()
- }()
- }
- stdout, err := cmd.StdoutPipe()
- if err != nil {
- return nil, err
- }
- stderr, err := cmd.StderrPipe()
- if err != nil {
- return nil, err
- }
+func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) {
+ chdone := make(chan struct{})
+ cmd.Stdin = input
pipeR, pipeW := io.Pipe()
- errChan := make(chan []byte)
- // Collect stderr, we will use it in case of an error
- go func() {
- errText, e := ioutil.ReadAll(stderr)
- if e != nil {
- errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")")
- }
- errChan <- errText
- }()
+ cmd.Stdout = pipeW
+ var errBuf bytes.Buffer
+ cmd.Stderr = &errBuf
+
+ // Run the command and return the pipe
+ if err := cmd.Start(); err != nil {
+ return nil, nil, err
+ }
+
// Copy stdout to the returned pipe
go func() {
- _, err := io.Copy(pipeW, stdout)
- if err != nil {
- pipeW.CloseWithError(err)
- }
- errText := <-errChan
if err := cmd.Wait(); err != nil {
- pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText))
+ pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String()))
} else {
pipeW.Close()
}
+ close(chdone)
}()
- // Run the command and return the pipe
- if err := cmd.Start(); err != nil {
- return nil, err
- }
- return pipeR, nil
+
+ return pipeR, chdone, nil
}
// NewTempArchive reads the content of src into a temporary file, and returns the contents
@@ -872,6 +1115,8 @@ func NewTempArchive(src Archive, dir string) (*TempArchive, error) {
return &TempArchive{File: f, Size: size}, nil
}
+// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes,
+// the file will be deleted.
type TempArchive struct {
*os.File
Size int64 // Pre-computed from Stat().Size() as a convenience
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go
new file mode 100644
index 00000000000..277ff98885c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go
@@ -0,0 +1,90 @@
+package archive
+
+import (
+ "archive/tar"
+ "os"
+ "path/filepath"
+ "strings"
+ "syscall"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
+ if format == OverlayWhiteoutFormat {
+ return overlayWhiteoutConverter{}
+ }
+ return nil
+}
+
+type overlayWhiteoutConverter struct{}
+
+func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) error {
+ // convert whiteouts to AUFS format
+ if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
+ // we just rename the file and make it normal
+ hdr.Name = WhiteoutPrefix + hdr.Name
+ hdr.Mode = 0600
+ hdr.Typeflag = tar.TypeReg
+ hdr.Size = 0
+ }
+
+ if fi.Mode()&os.ModeDir != 0 {
+ // convert opaque dirs to AUFS format by writing an empty file with the prefix
+ opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
+ if err != nil {
+ return err
+ }
+ if opaque != nil && len(opaque) == 1 && opaque[0] == 'y' {
+ // create a header for the whiteout file
+ // it should inherit some properties from the parent, but be a regular file
+ *hdr = tar.Header{
+ Typeflag: tar.TypeReg,
+ Mode: hdr.Mode & int64(os.ModePerm),
+ Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
+ Size: 0,
+ Uid: hdr.Uid,
+ Uname: hdr.Uname,
+ Gid: hdr.Gid,
+ Gname: hdr.Gname,
+ AccessTime: hdr.AccessTime,
+ ChangeTime: hdr.ChangeTime,
+ }
+ }
+ }
+
+ return nil
+}
+
+func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
+ base := filepath.Base(path)
+ dir := filepath.Dir(path)
+
+ // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
+ if base == WhiteoutOpaqueDir {
+ if err := syscall.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0); err != nil {
+ return false, err
+ }
+
+ // don't write the file itself
+ return false, nil
+ }
+
+ // if a file was deleted and we are using overlay, we need to create a character device
+ if strings.HasPrefix(base, WhiteoutPrefix) {
+ originalBase := base[len(WhiteoutPrefix):]
+ originalPath := filepath.Join(dir, originalBase)
+
+ if err := syscall.Mknod(originalPath, syscall.S_IFCHR, 0); err != nil {
+ return false, err
+ }
+ if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
+ return false, err
+ }
+
+ // don't write the file itself
+ return false, nil
+ }
+
+ return true, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go
new file mode 100644
index 00000000000..54acbf28566
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_other.go
@@ -0,0 +1,7 @@
+// +build !linux
+
+package archive
+
+func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_test.go b/vendor/github.com/docker/docker/pkg/archive/archive_test.go
new file mode 100644
index 00000000000..85e41227c06
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_test.go
@@ -0,0 +1,1148 @@
+package archive
+
+import (
+ "archive/tar"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+ "time"
+)
+
+var tmp string
+
+func init() {
+ tmp = "/tmp/"
+ if runtime.GOOS == "windows" {
+ tmp = os.Getenv("TEMP") + `\`
+ }
+}
+
+func TestIsArchiveNilHeader(t *testing.T) {
+ out := IsArchive(nil)
+ if out {
+ t.Fatalf("isArchive should return false as nil is not a valid archive header")
+ }
+}
+
+func TestIsArchiveInvalidHeader(t *testing.T) {
+ header := []byte{0x00, 0x01, 0x02}
+ out := IsArchive(header)
+ if out {
+ t.Fatalf("isArchive should return false as %s is not a valid archive header", header)
+ }
+}
+
+func TestIsArchiveBzip2(t *testing.T) {
+ header := []byte{0x42, 0x5A, 0x68}
+ out := IsArchive(header)
+ if !out {
+ t.Fatalf("isArchive should return true as %s is a bz2 header", header)
+ }
+}
+
+func TestIsArchive7zip(t *testing.T) {
+ header := []byte{0x50, 0x4b, 0x03, 0x04}
+ out := IsArchive(header)
+ if out {
+ t.Fatalf("isArchive should return false as %s is a 7z header and it is not supported", header)
+ }
+}
+
+func TestIsArchivePathDir(t *testing.T) {
+ cmd := exec.Command("sh", "-c", "mkdir -p /tmp/archivedir")
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("Fail to create an archive file for test : %s.", output)
+ }
+ if IsArchivePath(tmp + "archivedir") {
+ t.Fatalf("Incorrectly recognised directory as an archive")
+ }
+}
+
+func TestIsArchivePathInvalidFile(t *testing.T) {
+ cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1K count=1 of=/tmp/archive && gzip --stdout /tmp/archive > /tmp/archive.gz")
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("Fail to create an archive file for test : %s.", output)
+ }
+ if IsArchivePath(tmp + "archive") {
+ t.Fatalf("Incorrectly recognised invalid tar path as archive")
+ }
+ if IsArchivePath(tmp + "archive.gz") {
+ t.Fatalf("Incorrectly recognised invalid compressed tar path as archive")
+ }
+}
+
+func TestIsArchivePathTar(t *testing.T) {
+ cmd := exec.Command("sh", "-c", "touch /tmp/archivedata && tar -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz")
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("Fail to create an archive file for test : %s.", output)
+ }
+ if !IsArchivePath(tmp + "/archive") {
+ t.Fatalf("Did not recognise valid tar path as archive")
+ }
+ if !IsArchivePath(tmp + "archive.gz") {
+ t.Fatalf("Did not recognise valid compressed tar path as archive")
+ }
+}
+
+func TestDecompressStreamGzip(t *testing.T) {
+ cmd := exec.Command("sh", "-c", "touch /tmp/archive && gzip -f /tmp/archive")
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("Fail to create an archive file for test : %s.", output)
+ }
+ archive, err := os.Open(tmp + "archive.gz")
+ _, err = DecompressStream(archive)
+ if err != nil {
+ t.Fatalf("Failed to decompress a gzip file.")
+ }
+}
+
+func TestDecompressStreamBzip2(t *testing.T) {
+ cmd := exec.Command("sh", "-c", "touch /tmp/archive && bzip2 -f /tmp/archive")
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("Fail to create an archive file for test : %s.", output)
+ }
+ archive, err := os.Open(tmp + "archive.bz2")
+ _, err = DecompressStream(archive)
+ if err != nil {
+ t.Fatalf("Failed to decompress a bzip2 file.")
+ }
+}
+
+func TestDecompressStreamXz(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("Xz not present in msys2")
+ }
+ cmd := exec.Command("sh", "-c", "touch /tmp/archive && xz -f /tmp/archive")
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("Fail to create an archive file for test : %s.", output)
+ }
+ archive, err := os.Open(tmp + "archive.xz")
+ _, err = DecompressStream(archive)
+ if err != nil {
+ t.Fatalf("Failed to decompress an xz file.")
+ }
+}
+
+func TestCompressStreamXzUnsuported(t *testing.T) {
+ dest, err := os.Create(tmp + "dest")
+ if err != nil {
+ t.Fatalf("Fail to create the destination file")
+ }
+ _, err = CompressStream(dest, Xz)
+ if err == nil {
+ t.Fatalf("Should fail as xz is unsupported for compression format.")
+ }
+}
+
+func TestCompressStreamBzip2Unsupported(t *testing.T) {
+ dest, err := os.Create(tmp + "dest")
+ if err != nil {
+ t.Fatalf("Fail to create the destination file")
+ }
+ _, err = CompressStream(dest, Xz)
+ if err == nil {
+ t.Fatalf("Should fail as xz is unsupported for compression format.")
+ }
+}
+
+func TestCompressStreamInvalid(t *testing.T) {
+ dest, err := os.Create(tmp + "dest")
+ if err != nil {
+ t.Fatalf("Fail to create the destination file")
+ }
+ _, err = CompressStream(dest, -1)
+ if err == nil {
+ t.Fatalf("Should fail as xz is unsupported for compression format.")
+ }
+}
+
+func TestExtensionInvalid(t *testing.T) {
+ compression := Compression(-1)
+ output := compression.Extension()
+ if output != "" {
+ t.Fatalf("The extension of an invalid compression should be an empty string.")
+ }
+}
+
+func TestExtensionUncompressed(t *testing.T) {
+ compression := Uncompressed
+ output := compression.Extension()
+ if output != "tar" {
+ t.Fatalf("The extension of an uncompressed archive should be 'tar'.")
+ }
+}
+func TestExtensionBzip2(t *testing.T) {
+ compression := Bzip2
+ output := compression.Extension()
+ if output != "tar.bz2" {
+ t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'")
+ }
+}
+func TestExtensionGzip(t *testing.T) {
+ compression := Gzip
+ output := compression.Extension()
+ if output != "tar.gz" {
+ t.Fatalf("The extension of a bzip2 archive should be 'tar.gz'")
+ }
+}
+func TestExtensionXz(t *testing.T) {
+ compression := Xz
+ output := compression.Extension()
+ if output != "tar.xz" {
+ t.Fatalf("The extension of a bzip2 archive should be 'tar.xz'")
+ }
+}
+
+func TestCmdStreamLargeStderr(t *testing.T) {
+ cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello")
+ out, _, err := cmdStream(cmd, nil)
+ if err != nil {
+ t.Fatalf("Failed to start command: %s", err)
+ }
+ errCh := make(chan error)
+ go func() {
+ _, err := io.Copy(ioutil.Discard, out)
+ errCh <- err
+ }()
+ select {
+ case err := <-errCh:
+ if err != nil {
+ t.Fatalf("Command should not have failed (err=%.100s...)", err)
+ }
+ case <-time.After(5 * time.Second):
+ t.Fatalf("Command did not complete in 5 seconds; probable deadlock")
+ }
+}
+
+func TestCmdStreamBad(t *testing.T) {
+ // TODO Windows: Figure out why this is failing in CI but not locally
+ if runtime.GOOS == "windows" {
+ t.Skip("Failing on Windows CI machines")
+ }
+ badCmd := exec.Command("sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1")
+ out, _, err := cmdStream(badCmd, nil)
+ if err != nil {
+ t.Fatalf("Failed to start command: %s", err)
+ }
+ if output, err := ioutil.ReadAll(out); err == nil {
+ t.Fatalf("Command should have failed")
+ } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" {
+ t.Fatalf("Wrong error value (%s)", err)
+ } else if s := string(output); s != "hello\n" {
+ t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output)
+ }
+}
+
+func TestCmdStreamGood(t *testing.T) {
+ cmd := exec.Command("sh", "-c", "echo hello; exit 0")
+ out, _, err := cmdStream(cmd, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if output, err := ioutil.ReadAll(out); err != nil {
+ t.Fatalf("Command should not have failed (err=%s)", err)
+ } else if s := string(output); s != "hello\n" {
+ t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output)
+ }
+}
+
+func TestUntarPathWithInvalidDest(t *testing.T) {
+ tempFolder, err := ioutil.TempDir("", "docker-archive-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tempFolder)
+ invalidDestFolder := filepath.Join(tempFolder, "invalidDest")
+ // Create a src file
+ srcFile := filepath.Join(tempFolder, "src")
+ tarFile := filepath.Join(tempFolder, "src.tar")
+ os.Create(srcFile)
+ os.Create(invalidDestFolder) // being a file (not dir) should cause an error
+
+ // Translate back to Unix semantics as next exec.Command is run under sh
+ srcFileU := srcFile
+ tarFileU := tarFile
+ if runtime.GOOS == "windows" {
+ tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar"
+ srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src"
+ }
+
+ cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU)
+ _, err = cmd.CombinedOutput()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = UntarPath(tarFile, invalidDestFolder)
+ if err == nil {
+ t.Fatalf("UntarPath with invalid destination path should throw an error.")
+ }
+}
+
+func TestUntarPathWithInvalidSrc(t *testing.T) {
+ dest, err := ioutil.TempDir("", "docker-archive-test")
+ if err != nil {
+ t.Fatalf("Fail to create the destination file")
+ }
+ defer os.RemoveAll(dest)
+ err = UntarPath("/invalid/path", dest)
+ if err == nil {
+ t.Fatalf("UntarPath with invalid src path should throw an error.")
+ }
+}
+
+func TestUntarPath(t *testing.T) {
+ tmpFolder, err := ioutil.TempDir("", "docker-archive-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpFolder)
+ srcFile := filepath.Join(tmpFolder, "src")
+ tarFile := filepath.Join(tmpFolder, "src.tar")
+ os.Create(filepath.Join(tmpFolder, "src"))
+
+ destFolder := filepath.Join(tmpFolder, "dest")
+ err = os.MkdirAll(destFolder, 0740)
+ if err != nil {
+ t.Fatalf("Fail to create the destination file")
+ }
+
+ // Translate back to Unix semantics as next exec.Command is run under sh
+ srcFileU := srcFile
+ tarFileU := tarFile
+ if runtime.GOOS == "windows" {
+ tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar"
+ srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src"
+ }
+ cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU)
+ _, err = cmd.CombinedOutput()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = UntarPath(tarFile, destFolder)
+ if err != nil {
+ t.Fatalf("UntarPath shouldn't throw an error, %s.", err)
+ }
+ expectedFile := filepath.Join(destFolder, srcFileU)
+ _, err = os.Stat(expectedFile)
+ if err != nil {
+ t.Fatalf("Destination folder should contain the source file but did not.")
+ }
+}
+
+// Do the same test as above but with the destination as file, it should fail
+func TestUntarPathWithDestinationFile(t *testing.T) {
+ tmpFolder, err := ioutil.TempDir("", "docker-archive-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpFolder)
+ srcFile := filepath.Join(tmpFolder, "src")
+ tarFile := filepath.Join(tmpFolder, "src.tar")
+ os.Create(filepath.Join(tmpFolder, "src"))
+
+ // Translate back to Unix semantics as next exec.Command is run under sh
+ srcFileU := srcFile
+ tarFileU := tarFile
+ if runtime.GOOS == "windows" {
+ tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar"
+ srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src"
+ }
+ cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU)
+ _, err = cmd.CombinedOutput()
+ if err != nil {
+ t.Fatal(err)
+ }
+ destFile := filepath.Join(tmpFolder, "dest")
+ _, err = os.Create(destFile)
+ if err != nil {
+ t.Fatalf("Fail to create the destination file")
+ }
+ err = UntarPath(tarFile, destFile)
+ if err == nil {
+ t.Fatalf("UntarPath should throw an error if the destination if a file")
+ }
+}
+
+// Do the same test as above but with the destination folder already exists
+// and the destination file is a directory
+// It's working, see https://github.com/docker/docker/issues/10040
+func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) {
+ tmpFolder, err := ioutil.TempDir("", "docker-archive-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpFolder)
+ srcFile := filepath.Join(tmpFolder, "src")
+ tarFile := filepath.Join(tmpFolder, "src.tar")
+ os.Create(srcFile)
+
+ // Translate back to Unix semantics as next exec.Command is run under sh
+ srcFileU := srcFile
+ tarFileU := tarFile
+ if runtime.GOOS == "windows" {
+ tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar"
+ srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src"
+ }
+
+ cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU)
+ _, err = cmd.CombinedOutput()
+ if err != nil {
+ t.Fatal(err)
+ }
+ destFolder := filepath.Join(tmpFolder, "dest")
+ err = os.MkdirAll(destFolder, 0740)
+ if err != nil {
+ t.Fatalf("Fail to create the destination folder")
+ }
+ // Let's create a folder that will has the same path as the extracted file (from tar)
+ destSrcFileAsFolder := filepath.Join(destFolder, srcFileU)
+ err = os.MkdirAll(destSrcFileAsFolder, 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = UntarPath(tarFile, destFolder)
+ if err != nil {
+ t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder")
+ }
+}
+
+func TestCopyWithTarInvalidSrc(t *testing.T) {
+ tempFolder, err := ioutil.TempDir("", "docker-archive-test")
+ if err != nil {
+ t.Fatal(nil)
+ }
+ destFolder := filepath.Join(tempFolder, "dest")
+ invalidSrc := filepath.Join(tempFolder, "doesnotexists")
+ err = os.MkdirAll(destFolder, 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = CopyWithTar(invalidSrc, destFolder)
+ if err == nil {
+ t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.")
+ }
+}
+
+func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) {
+ tempFolder, err := ioutil.TempDir("", "docker-archive-test")
+ if err != nil {
+ t.Fatal(nil)
+ }
+ srcFolder := filepath.Join(tempFolder, "src")
+ inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists")
+ err = os.MkdirAll(srcFolder, 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = CopyWithTar(srcFolder, inexistentDestFolder)
+ if err != nil {
+ t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.")
+ }
+ _, err = os.Stat(inexistentDestFolder)
+ if err != nil {
+ t.Fatalf("CopyWithTar with an inexistent folder should create it.")
+ }
+}
+
+// Test CopyWithTar with a file as src
+func TestCopyWithTarSrcFile(t *testing.T) {
+ folder, err := ioutil.TempDir("", "docker-archive-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(folder)
+ dest := filepath.Join(folder, "dest")
+ srcFolder := filepath.Join(folder, "src")
+ src := filepath.Join(folder, filepath.Join("src", "src"))
+ err = os.MkdirAll(srcFolder, 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = os.MkdirAll(dest, 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ ioutil.WriteFile(src, []byte("content"), 0777)
+ err = CopyWithTar(src, dest)
+ if err != nil {
+ t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err)
+ }
+ _, err = os.Stat(dest)
+ // FIXME Check the content
+ if err != nil {
+ t.Fatalf("Destination file should be the same as the source.")
+ }
+}
+
+// Test CopyWithTar with a folder as src
+func TestCopyWithTarSrcFolder(t *testing.T) {
+ folder, err := ioutil.TempDir("", "docker-archive-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(folder)
+ dest := filepath.Join(folder, "dest")
+ src := filepath.Join(folder, filepath.Join("src", "folder"))
+ err = os.MkdirAll(src, 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = os.MkdirAll(dest, 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ ioutil.WriteFile(filepath.Join(src, "file"), []byte("content"), 0777)
+ err = CopyWithTar(src, dest)
+ if err != nil {
+ t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err)
+ }
+ _, err = os.Stat(dest)
+ // FIXME Check the content (the file inside)
+ if err != nil {
+ t.Fatalf("Destination folder should contain the source file but did not.")
+ }
+}
+
+func TestCopyFileWithTarInvalidSrc(t *testing.T) {
+ tempFolder, err := ioutil.TempDir("", "docker-archive-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tempFolder)
+ destFolder := filepath.Join(tempFolder, "dest")
+ err = os.MkdirAll(destFolder, 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ invalidFile := filepath.Join(tempFolder, "doesnotexists")
+ err = CopyFileWithTar(invalidFile, destFolder)
+ if err == nil {
+ t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.")
+ }
+}
+
+func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) {
+ tempFolder, err := ioutil.TempDir("", "docker-archive-test")
+ if err != nil {
+ t.Fatal(nil)
+ }
+ defer os.RemoveAll(tempFolder)
+ srcFile := filepath.Join(tempFolder, "src")
+ inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists")
+ _, err = os.Create(srcFile)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = CopyFileWithTar(srcFile, inexistentDestFolder)
+ if err != nil {
+ t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.")
+ }
+ _, err = os.Stat(inexistentDestFolder)
+ if err != nil {
+ t.Fatalf("CopyWithTar with an inexistent folder should create it.")
+ }
+ // FIXME Test the src file and content
+}
+
+func TestCopyFileWithTarSrcFolder(t *testing.T) {
+ folder, err := ioutil.TempDir("", "docker-archive-copyfilewithtar-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(folder)
+ dest := filepath.Join(folder, "dest")
+ src := filepath.Join(folder, "srcfolder")
+ err = os.MkdirAll(src, 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = os.MkdirAll(dest, 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = CopyFileWithTar(src, dest)
+ if err == nil {
+ t.Fatalf("CopyFileWithTar should throw an error with a folder.")
+ }
+}
+
+func TestCopyFileWithTarSrcFile(t *testing.T) {
+ folder, err := ioutil.TempDir("", "docker-archive-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(folder)
+ dest := filepath.Join(folder, "dest")
+ srcFolder := filepath.Join(folder, "src")
+ src := filepath.Join(folder, filepath.Join("src", "src"))
+ err = os.MkdirAll(srcFolder, 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = os.MkdirAll(dest, 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ ioutil.WriteFile(src, []byte("content"), 0777)
+ err = CopyWithTar(src, dest+"/")
+ if err != nil {
+ t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err)
+ }
+ _, err = os.Stat(dest)
+ if err != nil {
+ t.Fatalf("Destination folder should contain the source file but did not.")
+ }
+}
+
+func TestTarFiles(t *testing.T) {
+ // TODO Windows: Figure out how to port this test.
+ if runtime.GOOS == "windows" {
+ t.Skip("Failing on Windows")
+ }
+ // try without hardlinks
+ if err := checkNoChanges(1000, false); err != nil {
+ t.Fatal(err)
+ }
+ // try with hardlinks
+ if err := checkNoChanges(1000, true); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func checkNoChanges(fileNum int, hardlinks bool) error {
+ srcDir, err := ioutil.TempDir("", "docker-test-srcDir")
+ if err != nil {
+ return err
+ }
+ defer os.RemoveAll(srcDir)
+
+ destDir, err := ioutil.TempDir("", "docker-test-destDir")
+ if err != nil {
+ return err
+ }
+ defer os.RemoveAll(destDir)
+
+ _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks)
+ if err != nil {
+ return err
+ }
+
+ err = TarUntar(srcDir, destDir)
+ if err != nil {
+ return err
+ }
+
+ changes, err := ChangesDirs(destDir, srcDir)
+ if err != nil {
+ return err
+ }
+ if len(changes) > 0 {
+ return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes))
+ }
+ return nil
+}
+
+func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) {
+ archive, err := TarWithOptions(origin, options)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer archive.Close()
+
+ buf := make([]byte, 10)
+ if _, err := archive.Read(buf); err != nil {
+ return nil, err
+ }
+ wrap := io.MultiReader(bytes.NewReader(buf), archive)
+
+ detectedCompression := DetectCompression(buf)
+ compression := options.Compression
+ if detectedCompression.Extension() != compression.Extension() {
+ return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension())
+ }
+
+ tmp, err := ioutil.TempDir("", "docker-test-untar")
+ if err != nil {
+ return nil, err
+ }
+ defer os.RemoveAll(tmp)
+ if err := Untar(wrap, tmp, nil); err != nil {
+ return nil, err
+ }
+ if _, err := os.Stat(tmp); err != nil {
+ return nil, err
+ }
+
+ return ChangesDirs(origin, tmp)
+}
+
+func TestTarUntar(t *testing.T) {
+ // TODO Windows: Figure out how to fix this test.
+ if runtime.GOOS == "windows" {
+ t.Skip("Failing on Windows")
+ }
+ origin, err := ioutil.TempDir("", "docker-test-untar-origin")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(origin)
+ if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil {
+ t.Fatal(err)
+ }
+ if err := ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil {
+ t.Fatal(err)
+ }
+ if err := ioutil.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil {
+ t.Fatal(err)
+ }
+
+ for _, c := range []Compression{
+ Uncompressed,
+ Gzip,
+ } {
+ changes, err := tarUntar(t, origin, &TarOptions{
+ Compression: c,
+ ExcludePatterns: []string{"3"},
+ })
+
+ if err != nil {
+ t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err)
+ }
+
+ if len(changes) != 1 || changes[0].Path != "/3" {
+ t.Fatalf("Unexpected differences after tarUntar: %v", changes)
+ }
+ }
+}
+
+func TestTarWithOptions(t *testing.T) {
+ // TODO Windows: Figure out how to fix this test.
+ if runtime.GOOS == "windows" {
+ t.Skip("Failing on Windows")
+ }
+ origin, err := ioutil.TempDir("", "docker-test-untar-origin")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := ioutil.TempDir(origin, "folder"); err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(origin)
+ if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil {
+ t.Fatal(err)
+ }
+ if err := ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil {
+ t.Fatal(err)
+ }
+
+ cases := []struct {
+ opts *TarOptions
+ numChanges int
+ }{
+ {&TarOptions{IncludeFiles: []string{"1"}}, 2},
+ {&TarOptions{ExcludePatterns: []string{"2"}}, 1},
+ {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2},
+ {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2},
+ {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4},
+ }
+ for _, testCase := range cases {
+ changes, err := tarUntar(t, origin, testCase.opts)
+ if err != nil {
+ t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err)
+ }
+ if len(changes) != testCase.numChanges {
+ t.Errorf("Expected %d changes, got %d for %+v:",
+ testCase.numChanges, len(changes), testCase.opts)
+ }
+ }
+}
+
+// Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz
+// use PAX Global Extended Headers.
+// Failing prevents the archives from being uncompressed during ADD
+func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) {
+ hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader}
+ tmpDir, err := ioutil.TempDir("", "docker-test-archive-pax-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpDir)
+ err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Some tar have both GNU specific (huge uid) and Ustar specific (long name) things.
+// Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work.
+func TestUntarUstarGnuConflict(t *testing.T) {
+ f, err := os.Open("testdata/broken.tar")
+ if err != nil {
+ t.Fatal(err)
+ }
+ found := false
+ tr := tar.NewReader(f)
+ // Iterate through the files in the archive.
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ break
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm")
+ }
+}
+
+func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
+ fileData := []byte("fooo")
+ for n := 0; n < numberOfFiles; n++ {
+ fileName := fmt.Sprintf("file-%d", n)
+ if err := ioutil.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil {
+ return 0, err
+ }
+ if makeLinks {
+ if err := os.Link(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil {
+ return 0, err
+ }
+ }
+ }
+ totalSize := numberOfFiles * len(fileData)
+ return totalSize, nil
+}
+
+func BenchmarkTarUntar(b *testing.B) {
+ origin, err := ioutil.TempDir("", "docker-test-untar-origin")
+ if err != nil {
+ b.Fatal(err)
+ }
+ tempDir, err := ioutil.TempDir("", "docker-test-untar-destination")
+ if err != nil {
+ b.Fatal(err)
+ }
+ target := filepath.Join(tempDir, "dest")
+ n, err := prepareUntarSourceDirectory(100, origin, false)
+ if err != nil {
+ b.Fatal(err)
+ }
+ defer os.RemoveAll(origin)
+ defer os.RemoveAll(tempDir)
+
+ b.ResetTimer()
+ b.SetBytes(int64(n))
+ for n := 0; n < b.N; n++ {
+ err := TarUntar(origin, target)
+ if err != nil {
+ b.Fatal(err)
+ }
+ os.RemoveAll(target)
+ }
+}
+
+func BenchmarkTarUntarWithLinks(b *testing.B) {
+ origin, err := ioutil.TempDir("", "docker-test-untar-origin")
+ if err != nil {
+ b.Fatal(err)
+ }
+ tempDir, err := ioutil.TempDir("", "docker-test-untar-destination")
+ if err != nil {
+ b.Fatal(err)
+ }
+ target := filepath.Join(tempDir, "dest")
+ n, err := prepareUntarSourceDirectory(100, origin, true)
+ if err != nil {
+ b.Fatal(err)
+ }
+ defer os.RemoveAll(origin)
+ defer os.RemoveAll(tempDir)
+
+ b.ResetTimer()
+ b.SetBytes(int64(n))
+ for n := 0; n < b.N; n++ {
+ err := TarUntar(origin, target)
+ if err != nil {
+ b.Fatal(err)
+ }
+ os.RemoveAll(target)
+ }
+}
+
+func TestUntarInvalidFilenames(t *testing.T) {
+ // TODO Windows: Figure out how to fix this test.
+ if runtime.GOOS == "windows" {
+ t.Skip("Passes but hits breakoutError: platform and architecture is not supported")
+ }
+ for i, headers := range [][]*tar.Header{
+ {
+ {
+ Name: "../victim/dotdot",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ {
+ {
+ // Note the leading slash
+ Name: "/../victim/slash-dotdot",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ } {
+ if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil {
+ t.Fatalf("i=%d. %v", i, err)
+ }
+ }
+}
+
+func TestUntarHardlinkToSymlink(t *testing.T) {
+ // TODO Windows. There may be a way of running this, but turning off for now
+ if runtime.GOOS == "windows" {
+ t.Skip("hardlinks on Windows")
+ }
+ for i, headers := range [][]*tar.Header{
+ {
+ {
+ Name: "symlink1",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "regfile",
+ Mode: 0644,
+ },
+ {
+ Name: "symlink2",
+ Typeflag: tar.TypeLink,
+ Linkname: "symlink1",
+ Mode: 0644,
+ },
+ {
+ Name: "regfile",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ } {
+ if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil {
+ t.Fatalf("i=%d. %v", i, err)
+ }
+ }
+}
+
+func TestUntarInvalidHardlink(t *testing.T) {
+ // TODO Windows. There may be a way of running this, but turning off for now
+ if runtime.GOOS == "windows" {
+ t.Skip("hardlinks on Windows")
+ }
+ for i, headers := range [][]*tar.Header{
+ { // try reading victim/hello (../)
+ {
+ Name: "dotdot",
+ Typeflag: tar.TypeLink,
+ Linkname: "../victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try reading victim/hello (/../)
+ {
+ Name: "slash-dotdot",
+ Typeflag: tar.TypeLink,
+ // Note the leading slash
+ Linkname: "/../victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try writing victim/file
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeLink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "loophole-victim/file",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ { // try reading victim/hello (hardlink, symlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeLink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "symlink",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "loophole-victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // Try reading victim/hello (hardlink, hardlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeLink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "hardlink",
+ Typeflag: tar.TypeLink,
+ Linkname: "loophole-victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // Try removing victim directory (hardlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeLink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ } {
+ if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil {
+ t.Fatalf("i=%d. %v", i, err)
+ }
+ }
+}
+
+func TestUntarInvalidSymlink(t *testing.T) {
+ // TODO Windows. There may be a way of running this, but turning off for now
+ if runtime.GOOS == "windows" {
+ t.Skip("hardlinks on Windows")
+ }
+ for i, headers := range [][]*tar.Header{
+ { // try reading victim/hello (../)
+ {
+ Name: "dotdot",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try reading victim/hello (/../)
+ {
+ Name: "slash-dotdot",
+ Typeflag: tar.TypeSymlink,
+ // Note the leading slash
+ Linkname: "/../victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try writing victim/file
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "loophole-victim/file",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ { // try reading victim/hello (symlink, symlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "symlink",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "loophole-victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try reading victim/hello (symlink, hardlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "hardlink",
+ Typeflag: tar.TypeLink,
+ Linkname: "loophole-victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try removing victim directory (symlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ { // try writing to victim/newdir/newfile with a symlink in the path
+ {
+ // this header needs to be before the next one, or else there is an error
+ Name: "dir/loophole",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "dir/loophole/newdir/newfile",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ } {
+ if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil {
+ t.Fatalf("i=%d. %v", i, err)
+ }
+ }
+}
+
+func TestTempArchiveCloseMultipleTimes(t *testing.T) {
+ reader := ioutil.NopCloser(strings.NewReader("hello"))
+ tempArchive, err := NewTempArchive(reader, "")
+ buf := make([]byte, 10)
+ n, err := tempArchive.Read(buf)
+ if n != 5 {
+ t.Fatalf("Expected to read 5 bytes. Read %d instead", n)
+ }
+ for i := 0; i < 3; i++ {
+ if err = tempArchive.Close(); err != nil {
+ t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err)
+ }
+ }
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go
similarity index 67%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go
rename to vendor/github.com/docker/docker/pkg/archive/archive_unix.go
index 5c754373fab..fbc3bb8c4d1 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go
@@ -6,11 +6,26 @@ import (
"archive/tar"
"errors"
"os"
+ "path/filepath"
"syscall"
- "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
+ "github.com/docker/docker/pkg/system"
)
+// fixVolumePathPrefix does platform specific processing to ensure that if
+// the path being passed in is not in a volume path format, convert it to one.
+func fixVolumePathPrefix(srcPath string) string {
+ return srcPath
+}
+
+// getWalkRoot calculates the root path when performing a TarWithOptions.
+// We use a separate function as this is platform specific. On Linux, we
+// can't use filepath.Join(srcPath,include) because this will clean away
+// a trailing "." or "/" which may be important.
+func getWalkRoot(srcPath string, include string) string {
+ return srcPath + string(filepath.Separator) + include
+}
+
// CanonicalTarNameForPath returns platform-specific filepath
// to canonical posix-style path for tar archival. p is relative
// path.
@@ -25,7 +40,7 @@ func chmodTarEntry(perm os.FileMode) os.FileMode {
return perm // noop for unix as golang APIs provide perm bits correctly
}
-func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) {
+func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) {
s, ok := stat.(*syscall.Stat_t)
if !ok {
@@ -33,10 +48,9 @@ func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, st
return
}
- nlink = uint32(s.Nlink)
inode = uint64(s.Ino)
- // Currently go does not fil in the major/minors
+ // Currently go does not fill in the major/minors
if s.Mode&syscall.S_IFBLK != 0 ||
s.Mode&syscall.S_IFCHR != 0 {
hdr.Devmajor = int64(major(uint64(s.Rdev)))
@@ -46,6 +60,15 @@ func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, st
return
}
+func getFileUIDGID(stat interface{}) (int, int, error) {
+ s, ok := stat.(*syscall.Stat_t)
+
+ if !ok {
+ return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t")
+ }
+ return int(s.Uid), int(s.Gid), nil
+}
+
func major(device uint64) uint64 {
return (device >> 8) & 0xfff
}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix_test.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix_test.go
new file mode 100644
index 00000000000..548391b35dc
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix_test.go
@@ -0,0 +1,245 @@
+// +build !windows
+
+package archive
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "syscall"
+ "testing"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+func TestCanonicalTarNameForPath(t *testing.T) {
+ cases := []struct{ in, expected string }{
+ {"foo", "foo"},
+ {"foo/bar", "foo/bar"},
+ {"foo/dir/", "foo/dir/"},
+ }
+ for _, v := range cases {
+ if out, err := CanonicalTarNameForPath(v.in); err != nil {
+ t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err)
+ } else if out != v.expected {
+ t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out)
+ }
+ }
+}
+
+func TestCanonicalTarName(t *testing.T) {
+ cases := []struct {
+ in string
+ isDir bool
+ expected string
+ }{
+ {"foo", false, "foo"},
+ {"foo", true, "foo/"},
+ {"foo/bar", false, "foo/bar"},
+ {"foo/bar", true, "foo/bar/"},
+ }
+ for _, v := range cases {
+ if out, err := canonicalTarName(v.in, v.isDir); err != nil {
+ t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err)
+ } else if out != v.expected {
+ t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out)
+ }
+ }
+}
+
+func TestChmodTarEntry(t *testing.T) {
+ cases := []struct {
+ in, expected os.FileMode
+ }{
+ {0000, 0000},
+ {0777, 0777},
+ {0644, 0644},
+ {0755, 0755},
+ {0444, 0444},
+ }
+ for _, v := range cases {
+ if out := chmodTarEntry(v.in); out != v.expected {
+ t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out)
+ }
+ }
+}
+
+func TestTarWithHardLink(t *testing.T) {
+ origin, err := ioutil.TempDir("", "docker-test-tar-hardlink")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(origin)
+ if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.Link(filepath.Join(origin, "1"), filepath.Join(origin, "2")); err != nil {
+ t.Fatal(err)
+ }
+
+ var i1, i2 uint64
+ if i1, err = getNlink(filepath.Join(origin, "1")); err != nil {
+ t.Fatal(err)
+ }
+ // sanity check that we can hardlink
+ if i1 != 2 {
+ t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1)
+ }
+
+ dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dest)
+
+ // we'll do this in two steps to separate failure
+ fh, err := Tar(origin, Uncompressed)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // ensure we can read the whole thing with no error, before writing back out
+ buf, err := ioutil.ReadAll(fh)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ bRdr := bytes.NewReader(buf)
+ err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if i1, err = getInode(filepath.Join(dest, "1")); err != nil {
+ t.Fatal(err)
+ }
+ if i2, err = getInode(filepath.Join(dest, "2")); err != nil {
+ t.Fatal(err)
+ }
+
+ if i1 != i2 {
+ t.Errorf("expected matching inodes, but got %d and %d", i1, i2)
+ }
+}
+
+func getNlink(path string) (uint64, error) {
+ stat, err := os.Stat(path)
+ if err != nil {
+ return 0, err
+ }
+ statT, ok := stat.Sys().(*syscall.Stat_t)
+ if !ok {
+ return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys())
+ }
+ // We need this conversion on ARM64
+ return uint64(statT.Nlink), nil
+}
+
+func getInode(path string) (uint64, error) {
+ stat, err := os.Stat(path)
+ if err != nil {
+ return 0, err
+ }
+ statT, ok := stat.Sys().(*syscall.Stat_t)
+ if !ok {
+ return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys())
+ }
+ return statT.Ino, nil
+}
+
+func TestTarWithBlockCharFifo(t *testing.T) {
+ origin, err := ioutil.TempDir("", "docker-test-tar-hardlink")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(origin)
+ if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil {
+ t.Fatal(err)
+ }
+ if err := system.Mknod(filepath.Join(origin, "2"), syscall.S_IFBLK, int(system.Mkdev(int64(12), int64(5)))); err != nil {
+ t.Fatal(err)
+ }
+ if err := system.Mknod(filepath.Join(origin, "3"), syscall.S_IFCHR, int(system.Mkdev(int64(12), int64(5)))); err != nil {
+ t.Fatal(err)
+ }
+ if err := system.Mknod(filepath.Join(origin, "4"), syscall.S_IFIFO, int(system.Mkdev(int64(12), int64(5)))); err != nil {
+ t.Fatal(err)
+ }
+
+ dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dest)
+
+ // we'll do this in two steps to separate failure
+ fh, err := Tar(origin, Uncompressed)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // ensure we can read the whole thing with no error, before writing back out
+ buf, err := ioutil.ReadAll(fh)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ bRdr := bytes.NewReader(buf)
+ err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ changes, err := ChangesDirs(origin, dest)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(changes) > 0 {
+ t.Fatalf("Tar with special device (block, char, fifo) should keep them (recreate them when untar) : %v", changes)
+ }
+}
+
+// TestTarUntarWithXattr is Unix as Lsetxattr is not supported on Windows
+func TestTarUntarWithXattr(t *testing.T) {
+ origin, err := ioutil.TempDir("", "docker-test-untar-origin")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(origin)
+ if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil {
+ t.Fatal(err)
+ }
+ if err := ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil {
+ t.Fatal(err)
+ }
+ if err := ioutil.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil {
+ t.Fatal(err)
+ }
+ if err := system.Lsetxattr(filepath.Join(origin, "2"), "security.capability", []byte{0x00}, 0); err != nil {
+ t.Fatal(err)
+ }
+
+ for _, c := range []Compression{
+ Uncompressed,
+ Gzip,
+ } {
+ changes, err := tarUntar(t, origin, &TarOptions{
+ Compression: c,
+ ExcludePatterns: []string{"3"},
+ })
+
+ if err != nil {
+ t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err)
+ }
+
+ if len(changes) != 1 || changes[0].Path != "/3" {
+ t.Fatalf("Unexpected differences after tarUntar: %v", changes)
+ }
+ capability, _ := system.Lgetxattr(filepath.Join(origin, "2"), "security.capability")
+ if capability == nil && capability[0] != 0x00 {
+ t.Fatalf("Untar should have kept the 'security.capability' xattr.")
+ }
+ }
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go
similarity index 62%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go
rename to vendor/github.com/docker/docker/pkg/archive/archive_windows.go
index 10db4bd00ee..5c3a1be3401 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go
@@ -6,10 +6,25 @@ import (
"archive/tar"
"fmt"
"os"
+ "path/filepath"
"strings"
+
+ "github.com/docker/docker/pkg/longpath"
)
-// canonicalTarNameForPath returns platform-specific filepath
+// fixVolumePathPrefix does platform specific processing to ensure that if
+// the path being passed in is not in a volume path format, convert it to one.
+func fixVolumePathPrefix(srcPath string) string {
+ return longpath.AddPrefix(srcPath)
+}
+
+// getWalkRoot calculates the root path when performing a TarWithOptions.
+// We use a separate function as this is platform specific.
+func getWalkRoot(srcPath string, include string) string {
+ return filepath.Join(srcPath, include)
+}
+
+// CanonicalTarNameForPath returns platform-specific filepath
// to canonical posix-style path for tar archival. p is relative
// path.
func CanonicalTarNameForPath(p string) (string, error) {
@@ -34,7 +49,7 @@ func chmodTarEntry(perm os.FileMode) os.FileMode {
return perm
}
-func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) {
+func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) {
// do nothing. no notion of Rdev, Inode, Nlink in stat on Windows
return
}
@@ -48,3 +63,8 @@ func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
return nil
}
+
+func getFileUIDGID(stat interface{}) (int, int, error) {
+ // no notion of file ownership mapping yet on Windows
+ return 0, 0, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows_test.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows_test.go
new file mode 100644
index 00000000000..0c6733d6bd1
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/archive_windows_test.go
@@ -0,0 +1,91 @@
+// +build windows
+
+package archive
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+func TestCopyFileWithInvalidDest(t *testing.T) {
+ // TODO Windows: This is currently failing. Not sure what has
+ // recently changed in CopyWithTar as used to pass. Further investigation
+ // is required.
+ t.Skip("Currently fails")
+ folder, err := ioutil.TempDir("", "docker-archive-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(folder)
+ dest := "c:dest"
+ srcFolder := filepath.Join(folder, "src")
+ src := filepath.Join(folder, "src", "src")
+ err = os.MkdirAll(srcFolder, 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ ioutil.WriteFile(src, []byte("content"), 0777)
+ err = CopyWithTar(src, dest)
+ if err == nil {
+ t.Fatalf("archiver.CopyWithTar should throw an error on invalid dest.")
+ }
+}
+
+func TestCanonicalTarNameForPath(t *testing.T) {
+ cases := []struct {
+ in, expected string
+ shouldFail bool
+ }{
+ {"foo", "foo", false},
+ {"foo/bar", "___", true}, // unix-styled windows path must fail
+ {`foo\bar`, "foo/bar", false},
+ }
+ for _, v := range cases {
+ if out, err := CanonicalTarNameForPath(v.in); err != nil && !v.shouldFail {
+ t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err)
+ } else if v.shouldFail && err == nil {
+ t.Fatalf("canonical path call should have failed with error. in=%s out=%s", v.in, out)
+ } else if !v.shouldFail && out != v.expected {
+ t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out)
+ }
+ }
+}
+
+func TestCanonicalTarName(t *testing.T) {
+ cases := []struct {
+ in string
+ isDir bool
+ expected string
+ }{
+ {"foo", false, "foo"},
+ {"foo", true, "foo/"},
+ {`foo\bar`, false, "foo/bar"},
+ {`foo\bar`, true, "foo/bar/"},
+ }
+ for _, v := range cases {
+ if out, err := canonicalTarName(v.in, v.isDir); err != nil {
+ t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err)
+ } else if out != v.expected {
+ t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out)
+ }
+ }
+}
+
+func TestChmodTarEntry(t *testing.T) {
+ cases := []struct {
+ in, expected os.FileMode
+ }{
+ {0000, 0111},
+ {0777, 0755},
+ {0644, 0755},
+ {0755, 0755},
+ {0444, 0555},
+ }
+ for _, v := range cases {
+ if out := chmodTarEntry(v.in); out != v.expected {
+ t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out)
+ }
+ }
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go
similarity index 78%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go
rename to vendor/github.com/docker/docker/pkg/archive/changes.go
index c7838e8599e..4e2d8e54f7e 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go
+++ b/vendor/github.com/docker/docker/pkg/archive/changes.go
@@ -13,35 +13,47 @@ import (
"syscall"
"time"
- "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
- "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools"
- "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/system"
)
+// ChangeType represents the change type.
type ChangeType int
const (
+ // ChangeModify represents the modify operation.
ChangeModify = iota
+ // ChangeAdd represents the add operation.
ChangeAdd
+ // ChangeDelete represents the delete operation.
ChangeDelete
)
+func (c ChangeType) String() string {
+ switch c {
+ case ChangeModify:
+ return "C"
+ case ChangeAdd:
+ return "A"
+ case ChangeDelete:
+ return "D"
+ }
+ return ""
+}
+
+// Change represents a change, it wraps the change type and path.
+// It describes changes of the files in the path respect to the
+// parent layers. The change could be modify, add, delete.
+// This is used for layer diff.
type Change struct {
Path string
Kind ChangeType
}
func (change *Change) String() string {
- var kind string
- switch change.Kind {
- case ChangeModify:
- kind = "C"
- case ChangeAdd:
- kind = "A"
- case ChangeDelete:
- kind = "D"
- }
- return fmt.Sprintf("%s %s", kind, change.Path)
+ return fmt.Sprintf("%s %s", change.Kind, change.Path)
}
// for sort.Sort
@@ -69,6 +81,33 @@ func sameFsTimeSpec(a, b syscall.Timespec) bool {
// Changes walks the path rw and determines changes for the files in the path,
// with respect to the parent layers
func Changes(layers []string, rw string) ([]Change, error) {
+ return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
+}
+
+func aufsMetadataSkip(path string) (skip bool, err error) {
+ skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
+ if err != nil {
+ skip = true
+ }
+ return
+}
+
+func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
+ f := filepath.Base(path)
+
+ // If there is a whiteout, then the file was removed
+ if strings.HasPrefix(f, WhiteoutPrefix) {
+ originalFile := f[len(WhiteoutPrefix):]
+ return filepath.Join(filepath.Dir(path), originalFile), nil
+ }
+
+ return "", nil
+}
+
+type skipChange func(string) (bool, error)
+type deleteChange func(string, string, os.FileInfo) (string, error)
+
+func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
var (
changes []Change
changedDirs = make(map[string]struct{})
@@ -93,21 +132,24 @@ func Changes(layers []string, rw string) ([]Change, error) {
return nil
}
- // Skip AUFS metadata
- if matched, err := filepath.Match(string(os.PathSeparator)+".wh..wh.*", path); err != nil || matched {
- return err
+ if sc != nil {
+ if skip, err := sc(path); skip {
+ return err
+ }
}
change := Change{
Path: path,
}
+ deletedFile, err := dc(rw, path, f)
+ if err != nil {
+ return err
+ }
+
// Find out what kind of modification happened
- file := filepath.Base(path)
- // If there is a whiteout, then the file was removed
- if strings.HasPrefix(file, ".wh.") {
- originalFile := file[len(".wh."):]
- change.Path = filepath.Join(filepath.Dir(path), originalFile)
+ if deletedFile != "" {
+ change.Path = deletedFile
change.Kind = ChangeDelete
} else {
// Otherwise, the file was added
@@ -138,7 +180,7 @@ func Changes(layers []string, rw string) ([]Change, error) {
// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
// This block is here to ensure the change is recorded even if the
- // modify time, mode and size of the parent directoriy in the rw and ro layers are all equal.
+ // modify time, mode and size of the parent directory in the rw and ro layers are all equal.
// Check https://github.com/docker/docker/pull/13590 for details.
if f.IsDir() {
changedDirs[path] = struct{}{}
@@ -161,20 +203,22 @@ func Changes(layers []string, rw string) ([]Change, error) {
return changes, nil
}
+// FileInfo describes the information of a file.
type FileInfo struct {
parent *FileInfo
name string
- stat *system.Stat_t
+ stat *system.StatT
children map[string]*FileInfo
capability []byte
added bool
}
-func (root *FileInfo) LookUp(path string) *FileInfo {
+// LookUp looks up the file information of a file.
+func (info *FileInfo) LookUp(path string) *FileInfo {
// As this runs on the daemon side, file paths are OS specific.
- parent := root
+ parent := info
if path == string(os.PathSeparator) {
- return root
+ return info
}
pathElements := strings.Split(path, string(os.PathSeparator))
@@ -275,6 +319,7 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
}
+// Changes add changes to file information.
func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
var changes []Change
@@ -316,13 +361,29 @@ func ChangesDirs(newDir, oldDir string) ([]Change, error) {
// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
func ChangesSize(newDir string, changes []Change) int64 {
- var size int64
+ var (
+ size int64
+ sf = make(map[uint64]struct{})
+ )
for _, change := range changes {
if change.Kind == ChangeModify || change.Kind == ChangeAdd {
file := filepath.Join(newDir, change.Path)
- fileInfo, _ := os.Lstat(file)
+ fileInfo, err := os.Lstat(file)
+ if err != nil {
+ logrus.Errorf("Can not stat %q: %s", file, err)
+ continue
+ }
+
if fileInfo != nil && !fileInfo.IsDir() {
- size += fileInfo.Size()
+ if hasHardlinks(fileInfo) {
+ inode := getIno(fileInfo)
+ if _, ok := sf[inode]; !ok {
+ size += fileInfo.Size()
+ sf[inode] = struct{}{}
+ }
+ } else {
+ size += fileInfo.Size()
+ }
}
}
}
@@ -330,13 +391,15 @@ func ChangesSize(newDir string, changes []Change) int64 {
}
// ExportChanges produces an Archive from the provided changes, relative to dir.
-func ExportChanges(dir string, changes []Change) (Archive, error) {
+func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (Archive, error) {
reader, writer := io.Pipe()
go func() {
ta := &tarAppender{
TarWriter: tar.NewWriter(writer),
Buffer: pools.BufioWriter32KPool.Get(nil),
SeenFiles: make(map[uint64]string),
+ UIDMaps: uidMaps,
+ GIDMaps: gidMaps,
}
// this buffer is needed for the duration of this piped stream
defer pools.BufioWriter32KPool.Put(ta.Buffer)
@@ -351,7 +414,7 @@ func ExportChanges(dir string, changes []Change) (Archive, error) {
if change.Kind == ChangeDelete {
whiteOutDir := filepath.Dir(change.Path)
whiteOutBase := filepath.Base(change.Path)
- whiteOut := filepath.Join(whiteOutDir, ".wh."+whiteOutBase)
+ whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
timestamp := time.Now()
hdr := &tar.Header{
Name: whiteOut[1:],
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go
similarity index 90%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go
rename to vendor/github.com/docker/docker/pkg/archive/changes_linux.go
index 378cc09c859..a4cc0c65d39 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go
@@ -9,7 +9,7 @@ import (
"syscall"
"unsafe"
- "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
+ "github.com/docker/docker/pkg/system"
)
// walker is used to implement collectFileInfoForChanges on linux. Where this
@@ -283,3 +283,30 @@ func clen(n []byte) int {
}
return len(n)
}
+
+// OverlayChanges walks the path rw and determines changes for the files in the path,
+// with respect to the parent layers
+func OverlayChanges(layers []string, rw string) ([]Change, error) {
+ return changes(layers, rw, overlayDeletedFile, nil)
+}
+
+func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) {
+ if fi.Mode()&os.ModeCharDevice != 0 {
+ s := fi.Sys().(*syscall.Stat_t)
+ if major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 {
+ return path, nil
+ }
+ }
+ if fi.Mode()&os.ModeDir != 0 {
+ opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque")
+ if err != nil {
+ return "", err
+ }
+ if opaque != nil && len(opaque) == 1 && opaque[0] == 'y' {
+ return path, nil
+ }
+ }
+
+ return "", nil
+
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go
similarity index 96%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go
rename to vendor/github.com/docker/docker/pkg/archive/changes_other.go
index 35832f087d0..da70ed37c45 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_other.go
@@ -9,7 +9,7 @@ import (
"runtime"
"strings"
- "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
+ "github.com/docker/docker/pkg/system"
)
func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_posix_test.go b/vendor/github.com/docker/docker/pkg/archive/changes_posix_test.go
new file mode 100644
index 00000000000..5a3282b5a8a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_posix_test.go
@@ -0,0 +1,127 @@
+package archive
+
+import (
+ "archive/tar"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+ "sort"
+ "testing"
+)
+
+func TestHardLinkOrder(t *testing.T) {
+ names := []string{"file1.txt", "file2.txt", "file3.txt"}
+ msg := []byte("Hey y'all")
+
+ // Create dir
+ src, err := ioutil.TempDir("", "docker-hardlink-test-src-")
+ if err != nil {
+ t.Fatal(err)
+ }
+ //defer os.RemoveAll(src)
+ for _, name := range names {
+ func() {
+ fh, err := os.Create(path.Join(src, name))
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer fh.Close()
+ if _, err = fh.Write(msg); err != nil {
+ t.Fatal(err)
+ }
+ }()
+ }
+ // Create dest, with changes that includes hardlinks
+ dest, err := ioutil.TempDir("", "docker-hardlink-test-dest-")
+ if err != nil {
+ t.Fatal(err)
+ }
+ os.RemoveAll(dest) // we just want the name, at first
+ if err := copyDir(src, dest); err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dest)
+ for _, name := range names {
+ for i := 0; i < 5; i++ {
+ if err := os.Link(path.Join(dest, name), path.Join(dest, fmt.Sprintf("%s.link%d", name, i))); err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+
+ // get changes
+ changes, err := ChangesDirs(dest, src)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // sort
+ sort.Sort(changesByPath(changes))
+
+ // ExportChanges
+ ar, err := ExportChanges(dest, changes, nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ hdrs, err := walkHeaders(ar)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // reverse sort
+ sort.Sort(sort.Reverse(changesByPath(changes)))
+ // ExportChanges
+ arRev, err := ExportChanges(dest, changes, nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ hdrsRev, err := walkHeaders(arRev)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // line up the two sets
+ sort.Sort(tarHeaders(hdrs))
+ sort.Sort(tarHeaders(hdrsRev))
+
+ // compare Size and LinkName
+ for i := range hdrs {
+ if hdrs[i].Name != hdrsRev[i].Name {
+ t.Errorf("headers - expected name %q; but got %q", hdrs[i].Name, hdrsRev[i].Name)
+ }
+ if hdrs[i].Size != hdrsRev[i].Size {
+ t.Errorf("headers - %q expected size %d; but got %d", hdrs[i].Name, hdrs[i].Size, hdrsRev[i].Size)
+ }
+ if hdrs[i].Typeflag != hdrsRev[i].Typeflag {
+ t.Errorf("headers - %q expected type %d; but got %d", hdrs[i].Name, hdrs[i].Typeflag, hdrsRev[i].Typeflag)
+ }
+ if hdrs[i].Linkname != hdrsRev[i].Linkname {
+ t.Errorf("headers - %q expected linkname %q; but got %q", hdrs[i].Name, hdrs[i].Linkname, hdrsRev[i].Linkname)
+ }
+ }
+
+}
+
+type tarHeaders []tar.Header
+
+func (th tarHeaders) Len() int { return len(th) }
+func (th tarHeaders) Swap(i, j int) { th[j], th[i] = th[i], th[j] }
+func (th tarHeaders) Less(i, j int) bool { return th[i].Name < th[j].Name }
+
+func walkHeaders(r io.Reader) ([]tar.Header, error) {
+ t := tar.NewReader(r)
+ headers := []tar.Header{}
+ for {
+ hdr, err := t.Next()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return headers, err
+ }
+ headers = append(headers, *hdr)
+ }
+ return headers, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_test.go b/vendor/github.com/docker/docker/pkg/archive/changes_test.go
new file mode 100644
index 00000000000..8a2d0e8b157
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_test.go
@@ -0,0 +1,565 @@
+package archive
+
+import (
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path"
+ "runtime"
+ "sort"
+ "testing"
+ "time"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+func max(x, y int) int {
+ if x >= y {
+ return x
+ }
+ return y
+}
+
+func copyDir(src, dst string) error {
+ cmd := exec.Command("cp", "-a", src, dst)
+ if err := cmd.Run(); err != nil {
+ return err
+ }
+ return nil
+}
+
+type FileType uint32
+
+const (
+ Regular FileType = iota
+ Dir
+ Symlink
+)
+
+type FileData struct {
+ filetype FileType
+ path string
+ contents string
+ permissions os.FileMode
+}
+
+func createSampleDir(t *testing.T, root string) {
+ files := []FileData{
+ {Regular, "file1", "file1\n", 0600},
+ {Regular, "file2", "file2\n", 0666},
+ {Regular, "file3", "file3\n", 0404},
+ {Regular, "file4", "file4\n", 0600},
+ {Regular, "file5", "file5\n", 0600},
+ {Regular, "file6", "file6\n", 0600},
+ {Regular, "file7", "file7\n", 0600},
+ {Dir, "dir1", "", 0740},
+ {Regular, "dir1/file1-1", "file1-1\n", 01444},
+ {Regular, "dir1/file1-2", "file1-2\n", 0666},
+ {Dir, "dir2", "", 0700},
+ {Regular, "dir2/file2-1", "file2-1\n", 0666},
+ {Regular, "dir2/file2-2", "file2-2\n", 0666},
+ {Dir, "dir3", "", 0700},
+ {Regular, "dir3/file3-1", "file3-1\n", 0666},
+ {Regular, "dir3/file3-2", "file3-2\n", 0666},
+ {Dir, "dir4", "", 0700},
+ {Regular, "dir4/file3-1", "file4-1\n", 0666},
+ {Regular, "dir4/file3-2", "file4-2\n", 0666},
+ {Symlink, "symlink1", "target1", 0666},
+ {Symlink, "symlink2", "target2", 0666},
+ {Symlink, "symlink3", root + "/file1", 0666},
+ {Symlink, "symlink4", root + "/symlink3", 0666},
+ {Symlink, "dirSymlink", root + "/dir1", 0740},
+ }
+
+ now := time.Now()
+ for _, info := range files {
+ p := path.Join(root, info.path)
+ if info.filetype == Dir {
+ if err := os.MkdirAll(p, info.permissions); err != nil {
+ t.Fatal(err)
+ }
+ } else if info.filetype == Regular {
+ if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil {
+ t.Fatal(err)
+ }
+ } else if info.filetype == Symlink {
+ if err := os.Symlink(info.contents, p); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if info.filetype != Symlink {
+ // Set a consistent ctime, atime for all files and dirs
+ if err := system.Chtimes(p, now, now); err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+}
+
+func TestChangeString(t *testing.T) {
+ modifiyChange := Change{"change", ChangeModify}
+ toString := modifiyChange.String()
+ if toString != "C change" {
+ t.Fatalf("String() of a change with ChangeModifiy Kind should have been %s but was %s", "C change", toString)
+ }
+ addChange := Change{"change", ChangeAdd}
+ toString = addChange.String()
+ if toString != "A change" {
+ t.Fatalf("String() of a change with ChangeAdd Kind should have been %s but was %s", "A change", toString)
+ }
+ deleteChange := Change{"change", ChangeDelete}
+ toString = deleteChange.String()
+ if toString != "D change" {
+ t.Fatalf("String() of a change with ChangeDelete Kind should have been %s but was %s", "D change", toString)
+ }
+}
+
+func TestChangesWithNoChanges(t *testing.T) {
+ // TODO Windows. There may be a way of running this, but turning off for now
+ // as createSampleDir uses symlinks.
+ if runtime.GOOS == "windows" {
+ t.Skip("symlinks on Windows")
+ }
+ rwLayer, err := ioutil.TempDir("", "docker-changes-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(rwLayer)
+ layer, err := ioutil.TempDir("", "docker-changes-test-layer")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(layer)
+ createSampleDir(t, layer)
+ changes, err := Changes([]string{layer}, rwLayer)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(changes) != 0 {
+ t.Fatalf("Changes with no difference should have detect no changes, but detected %d", len(changes))
+ }
+}
+
+func TestChangesWithChanges(t *testing.T) {
+ // TODO Windows. There may be a way of running this, but turning off for now
+ // as createSampleDir uses symlinks.
+ if runtime.GOOS == "windows" {
+ t.Skip("symlinks on Windows")
+ }
+ // Mock the readonly layer
+ layer, err := ioutil.TempDir("", "docker-changes-test-layer")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(layer)
+ createSampleDir(t, layer)
+ os.MkdirAll(path.Join(layer, "dir1/subfolder"), 0740)
+
+ // Mock the RW layer
+ rwLayer, err := ioutil.TempDir("", "docker-changes-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(rwLayer)
+
+ // Create a folder in RW layer
+ dir1 := path.Join(rwLayer, "dir1")
+ os.MkdirAll(dir1, 0740)
+ deletedFile := path.Join(dir1, ".wh.file1-2")
+ ioutil.WriteFile(deletedFile, []byte{}, 0600)
+ modifiedFile := path.Join(dir1, "file1-1")
+ ioutil.WriteFile(modifiedFile, []byte{0x00}, 01444)
+ // Let's add a subfolder for a newFile
+ subfolder := path.Join(dir1, "subfolder")
+ os.MkdirAll(subfolder, 0740)
+ newFile := path.Join(subfolder, "newFile")
+ ioutil.WriteFile(newFile, []byte{}, 0740)
+
+ changes, err := Changes([]string{layer}, rwLayer)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectedChanges := []Change{
+ {"/dir1", ChangeModify},
+ {"/dir1/file1-1", ChangeModify},
+ {"/dir1/file1-2", ChangeDelete},
+ {"/dir1/subfolder", ChangeModify},
+ {"/dir1/subfolder/newFile", ChangeAdd},
+ }
+ checkChanges(expectedChanges, changes, t)
+}
+
+// See https://github.com/docker/docker/pull/13590
+func TestChangesWithChangesGH13590(t *testing.T) {
+ // TODO Windows. There may be a way of running this, but turning off for now
+ // as createSampleDir uses symlinks.
+ if runtime.GOOS == "windows" {
+ t.Skip("symlinks on Windows")
+ }
+ baseLayer, err := ioutil.TempDir("", "docker-changes-test.")
+ defer os.RemoveAll(baseLayer)
+
+ dir3 := path.Join(baseLayer, "dir1/dir2/dir3")
+ os.MkdirAll(dir3, 07400)
+
+ file := path.Join(dir3, "file.txt")
+ ioutil.WriteFile(file, []byte("hello"), 0666)
+
+ layer, err := ioutil.TempDir("", "docker-changes-test2.")
+ defer os.RemoveAll(layer)
+
+ // Test creating a new file
+ if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil {
+ t.Fatalf("Cmd failed: %q", err)
+ }
+
+ os.Remove(path.Join(layer, "dir1/dir2/dir3/file.txt"))
+ file = path.Join(layer, "dir1/dir2/dir3/file1.txt")
+ ioutil.WriteFile(file, []byte("bye"), 0666)
+
+ changes, err := Changes([]string{baseLayer}, layer)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectedChanges := []Change{
+ {"/dir1/dir2/dir3", ChangeModify},
+ {"/dir1/dir2/dir3/file1.txt", ChangeAdd},
+ }
+ checkChanges(expectedChanges, changes, t)
+
+ // Now test changing a file
+ layer, err = ioutil.TempDir("", "docker-changes-test3.")
+ defer os.RemoveAll(layer)
+
+ if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil {
+ t.Fatalf("Cmd failed: %q", err)
+ }
+
+ file = path.Join(layer, "dir1/dir2/dir3/file.txt")
+ ioutil.WriteFile(file, []byte("bye"), 0666)
+
+ changes, err = Changes([]string{baseLayer}, layer)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectedChanges = []Change{
+ {"/dir1/dir2/dir3/file.txt", ChangeModify},
+ }
+ checkChanges(expectedChanges, changes, t)
+}
+
+// Create a directory, copy it, make sure we report no changes between the two
+func TestChangesDirsEmpty(t *testing.T) {
+ // TODO Windows. There may be a way of running this, but turning off for now
+ // as createSampleDir uses symlinks.
+ if runtime.GOOS == "windows" {
+ t.Skip("symlinks on Windows")
+ }
+ src, err := ioutil.TempDir("", "docker-changes-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(src)
+ createSampleDir(t, src)
+ dst := src + "-copy"
+ if err := copyDir(src, dst); err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dst)
+ changes, err := ChangesDirs(dst, src)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(changes) != 0 {
+ t.Fatalf("Reported changes for identical dirs: %v", changes)
+ }
+ os.RemoveAll(src)
+ os.RemoveAll(dst)
+}
+
+func mutateSampleDir(t *testing.T, root string) {
+ // Remove a regular file
+ if err := os.RemoveAll(path.Join(root, "file1")); err != nil {
+ t.Fatal(err)
+ }
+
+ // Remove a directory
+ if err := os.RemoveAll(path.Join(root, "dir1")); err != nil {
+ t.Fatal(err)
+ }
+
+ // Remove a symlink
+ if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil {
+ t.Fatal(err)
+ }
+
+ // Rewrite a file
+ if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil {
+ t.Fatal(err)
+ }
+
+ // Replace a file
+ if err := os.RemoveAll(path.Join(root, "file3")); err != nil {
+ t.Fatal(err)
+ }
+ if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil {
+ t.Fatal(err)
+ }
+
+ // Touch file
+ if err := system.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil {
+ t.Fatal(err)
+ }
+
+ // Replace file with dir
+ if err := os.RemoveAll(path.Join(root, "file5")); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create new file
+ if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create new dir
+ if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a new symlink
+ if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil {
+ t.Fatal(err)
+ }
+
+ // Change a symlink
+ if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil {
+ t.Fatal(err)
+ }
+
+ // Replace dir with file
+ if err := os.RemoveAll(path.Join(root, "dir2")); err != nil {
+ t.Fatal(err)
+ }
+ if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil {
+ t.Fatal(err)
+ }
+
+ // Touch dir
+ if err := system.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestChangesDirsMutated(t *testing.T) {
+ // TODO Windows. There may be a way of running this, but turning off for now
+ // as createSampleDir uses symlinks.
+ if runtime.GOOS == "windows" {
+ t.Skip("symlinks on Windows")
+ }
+ src, err := ioutil.TempDir("", "docker-changes-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ createSampleDir(t, src)
+ dst := src + "-copy"
+ if err := copyDir(src, dst); err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(src)
+ defer os.RemoveAll(dst)
+
+ mutateSampleDir(t, dst)
+
+ changes, err := ChangesDirs(dst, src)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ sort.Sort(changesByPath(changes))
+
+ expectedChanges := []Change{
+ {"/dir1", ChangeDelete},
+ {"/dir2", ChangeModify},
+ {"/dirnew", ChangeAdd},
+ {"/file1", ChangeDelete},
+ {"/file2", ChangeModify},
+ {"/file3", ChangeModify},
+ {"/file4", ChangeModify},
+ {"/file5", ChangeModify},
+ {"/filenew", ChangeAdd},
+ {"/symlink1", ChangeDelete},
+ {"/symlink2", ChangeModify},
+ {"/symlinknew", ChangeAdd},
+ }
+
+ for i := 0; i < max(len(changes), len(expectedChanges)); i++ {
+ if i >= len(expectedChanges) {
+ t.Fatalf("unexpected change %s\n", changes[i].String())
+ }
+ if i >= len(changes) {
+ t.Fatalf("no change for expected change %s\n", expectedChanges[i].String())
+ }
+ if changes[i].Path == expectedChanges[i].Path {
+ if changes[i] != expectedChanges[i] {
+ t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String())
+ }
+ } else if changes[i].Path < expectedChanges[i].Path {
+ t.Fatalf("unexpected change %s\n", changes[i].String())
+ } else {
+ t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String())
+ }
+ }
+}
+
+func TestApplyLayer(t *testing.T) {
+ // TODO Windows. There may be a way of running this, but turning off for now
+ // as createSampleDir uses symlinks.
+ if runtime.GOOS == "windows" {
+ t.Skip("symlinks on Windows")
+ }
+ src, err := ioutil.TempDir("", "docker-changes-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ createSampleDir(t, src)
+ defer os.RemoveAll(src)
+ dst := src + "-copy"
+ if err := copyDir(src, dst); err != nil {
+ t.Fatal(err)
+ }
+ mutateSampleDir(t, dst)
+ defer os.RemoveAll(dst)
+
+ changes, err := ChangesDirs(dst, src)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ layer, err := ExportChanges(dst, changes, nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ layerCopy, err := NewTempArchive(layer, "")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := ApplyLayer(src, layerCopy); err != nil {
+ t.Fatal(err)
+ }
+
+ changes2, err := ChangesDirs(src, dst)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(changes2) != 0 {
+ t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2)
+ }
+}
+
+func TestChangesSizeWithHardlinks(t *testing.T) {
+ // TODO Windows. There may be a way of running this, but turning off for now
+ // as createSampleDir uses symlinks.
+ if runtime.GOOS == "windows" {
+ t.Skip("hardlinks on Windows")
+ }
+ srcDir, err := ioutil.TempDir("", "docker-test-srcDir")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(srcDir)
+
+ destDir, err := ioutil.TempDir("", "docker-test-destDir")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(destDir)
+
+ creationSize, err := prepareUntarSourceDirectory(100, destDir, true)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ changes, err := ChangesDirs(destDir, srcDir)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ got := ChangesSize(destDir, changes)
+ if got != int64(creationSize) {
+ t.Errorf("Expected %d bytes of changes, got %d", creationSize, got)
+ }
+}
+
+func TestChangesSizeWithNoChanges(t *testing.T) {
+ size := ChangesSize("/tmp", nil)
+ if size != 0 {
+ t.Fatalf("ChangesSizes with no changes should be 0, was %d", size)
+ }
+}
+
+func TestChangesSizeWithOnlyDeleteChanges(t *testing.T) {
+ changes := []Change{
+ {Path: "deletedPath", Kind: ChangeDelete},
+ }
+ size := ChangesSize("/tmp", changes)
+ if size != 0 {
+ t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size)
+ }
+}
+
+func TestChangesSize(t *testing.T) {
+ parentPath, err := ioutil.TempDir("", "docker-changes-test")
+ defer os.RemoveAll(parentPath)
+ addition := path.Join(parentPath, "addition")
+ if err := ioutil.WriteFile(addition, []byte{0x01, 0x01, 0x01}, 0744); err != nil {
+ t.Fatal(err)
+ }
+ modification := path.Join(parentPath, "modification")
+ if err = ioutil.WriteFile(modification, []byte{0x01, 0x01, 0x01}, 0744); err != nil {
+ t.Fatal(err)
+ }
+ changes := []Change{
+ {Path: "addition", Kind: ChangeAdd},
+ {Path: "modification", Kind: ChangeModify},
+ }
+ size := ChangesSize(parentPath, changes)
+ if size != 6 {
+ t.Fatalf("Expected 6 bytes of changes, got %d", size)
+ }
+}
+
+func checkChanges(expectedChanges, changes []Change, t *testing.T) {
+ sort.Sort(changesByPath(expectedChanges))
+ sort.Sort(changesByPath(changes))
+ for i := 0; i < max(len(changes), len(expectedChanges)); i++ {
+ if i >= len(expectedChanges) {
+ t.Fatalf("unexpected change %s\n", changes[i].String())
+ }
+ if i >= len(changes) {
+ t.Fatalf("no change for expected change %s\n", expectedChanges[i].String())
+ }
+ if changes[i].Path == expectedChanges[i].Path {
+ if changes[i] != expectedChanges[i] {
+ t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String())
+ }
+ } else if changes[i].Path < expectedChanges[i].Path {
+ t.Fatalf("unexpected change %s\n", changes[i].String())
+ } else {
+ t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String())
+ }
+ }
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go
similarity index 60%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go
rename to vendor/github.com/docker/docker/pkg/archive/changes_unix.go
index dc1ea608bef..3778b732cf4 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go
@@ -3,16 +3,17 @@
package archive
import (
+ "os"
"syscall"
- "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
+ "github.com/docker/docker/pkg/system"
)
-func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool {
+func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
// Don't look at size for dirs, its not a good measure of change
if oldStat.Mode() != newStat.Mode() ||
- oldStat.Uid() != newStat.Uid() ||
- oldStat.Gid() != newStat.Gid() ||
+ oldStat.UID() != newStat.UID() ||
+ oldStat.GID() != newStat.GID() ||
oldStat.Rdev() != newStat.Rdev() ||
// Don't look at size for dirs, its not a good measure of change
(oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR &&
@@ -25,3 +26,11 @@ func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool {
func (info *FileInfo) isDir() bool {
return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0
}
+
+func getIno(fi os.FileInfo) uint64 {
+ return uint64(fi.Sys().(*syscall.Stat_t).Ino)
+}
+
+func hasHardlinks(fi os.FileInfo) bool {
+ return fi.Sys().(*syscall.Stat_t).Nlink > 1
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go
similarity index 60%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go
rename to vendor/github.com/docker/docker/pkg/archive/changes_windows.go
index 6026575e5c8..af94243fc4b 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go
+++ b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go
@@ -1,10 +1,12 @@
package archive
import (
- "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
+ "os"
+
+ "github.com/docker/docker/pkg/system"
)
-func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool {
+func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
// Don't look at size for dirs, its not a good measure of change
if oldStat.ModTime() != newStat.ModTime() ||
@@ -18,3 +20,11 @@ func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool {
func (info *FileInfo) isDir() bool {
return info.parent == nil || info.stat.IsDir()
}
+
+func getIno(fi os.FileInfo) (inode uint64) {
+ return
+}
+
+func hasHardlinks(fi os.FileInfo) bool {
+ return false
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go
new file mode 100644
index 00000000000..a60c948d0dd
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/copy.go
@@ -0,0 +1,458 @@
+package archive
+
+import (
+ "archive/tar"
+ "errors"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/system"
+)
+
+// Errors used or returned by this file.
+var (
+ ErrNotDirectory = errors.New("not a directory")
+ ErrDirNotExists = errors.New("no such directory")
+ ErrCannotCopyDir = errors.New("cannot copy directory")
+ ErrInvalidCopySource = errors.New("invalid copy source content")
+)
+
+// PreserveTrailingDotOrSeparator returns the given cleaned path (after
+// processing using any utility functions from the path or filepath stdlib
+// packages) and appends a trailing `/.` or `/` if its corresponding original
+// path (from before being processed by utility functions from the path or
+// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
+// path already ends in a `.` path segment, then another is not added. If the
+// clean path already ends in a path separator, then another is not added.
+func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string {
+ // Ensure paths are in platform semantics
+ cleanedPath = normalizePath(cleanedPath)
+ originalPath = normalizePath(originalPath)
+
+ if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
+ if !hasTrailingPathSeparator(cleanedPath) {
+ // Add a separator if it doesn't already end with one (a cleaned
+ // path would only end in a separator if it is the root).
+ cleanedPath += string(filepath.Separator)
+ }
+ cleanedPath += "."
+ }
+
+ if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) {
+ cleanedPath += string(filepath.Separator)
+ }
+
+ return cleanedPath
+}
+
+// assertsDirectory returns whether the given path is
+// asserted to be a directory, i.e., the path ends with
+// a trailing '/' or `/.`, assuming a path separator of `/`.
+func assertsDirectory(path string) bool {
+ return hasTrailingPathSeparator(path) || specifiesCurrentDir(path)
+}
+
+// hasTrailingPathSeparator returns whether the given
+// path ends with the system's path separator character.
+func hasTrailingPathSeparator(path string) bool {
+ return len(path) > 0 && os.IsPathSeparator(path[len(path)-1])
+}
+
+// specifiesCurrentDir returns whether the given path specifies
+// a "current directory", i.e., the last path segment is `.`.
+func specifiesCurrentDir(path string) bool {
+ return filepath.Base(path) == "."
+}
+
+// SplitPathDirEntry splits the given path between its directory name and its
+// basename by first cleaning the path but preserves a trailing "." if the
+// original path specified the current directory.
+func SplitPathDirEntry(path string) (dir, base string) {
+ cleanedPath := filepath.Clean(normalizePath(path))
+
+ if specifiesCurrentDir(path) {
+ cleanedPath += string(filepath.Separator) + "."
+ }
+
+ return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
+}
+
+// TarResource archives the resource described by the given CopyInfo to a Tar
+// archive. A non-nil error is returned if sourcePath does not exist or is
+// asserted to be a directory but exists as another type of file.
+//
+// This function acts as a convenient wrapper around TarWithOptions, which
+// requires a directory as the source path. TarResource accepts either a
+// directory or a file path and correctly sets the Tar options.
+func TarResource(sourceInfo CopyInfo) (content Archive, err error) {
+ return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
+}
+
+// TarResourceRebase is like TarResource but renames the first path element of
+// items in the resulting tar archive to match the given rebaseName if not "".
+func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err error) {
+ sourcePath = normalizePath(sourcePath)
+ if _, err = os.Lstat(sourcePath); err != nil {
+ // Catches the case where the source does not exist or is not a
+ // directory if asserted to be a directory, as this also causes an
+ // error.
+ return
+ }
+
+ // Separate the source path between its directory and
+ // the entry in that directory which we are archiving.
+ sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
+
+ filter := []string{sourceBase}
+
+ logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
+
+ return TarWithOptions(sourceDir, &TarOptions{
+ Compression: Uncompressed,
+ IncludeFiles: filter,
+ IncludeSourceDir: true,
+ RebaseNames: map[string]string{
+ sourceBase: rebaseName,
+ },
+ })
+}
+
+// CopyInfo holds basic info about the source
+// or destination path of a copy operation.
+type CopyInfo struct {
+ Path string
+ Exists bool
+ IsDir bool
+ RebaseName string
+}
+
+// CopyInfoSourcePath stats the given path to create a CopyInfo
+// struct representing that resource for the source of an archive copy
+// operation. The given path should be an absolute local path. A source path
+// has all symlinks evaluated that appear before the last path separator ("/"
+// on Unix). As it is to be a copy source, the path must exist.
+func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
+ // normalize the file path and then evaluate the symbol link
+ // we will use the target file instead of the symbol link if
+ // followLink is set
+ path = normalizePath(path)
+
+ resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+
+ stat, err := os.Lstat(resolvedPath)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+
+ return CopyInfo{
+ Path: resolvedPath,
+ Exists: true,
+ IsDir: stat.IsDir(),
+ RebaseName: rebaseName,
+ }, nil
+}
+
+// CopyInfoDestinationPath stats the given path to create a CopyInfo
+// struct representing that resource for the destination of an archive copy
+// operation. The given path should be an absolute local path.
+func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
+ maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
+ path = normalizePath(path)
+ originalPath := path
+
+ stat, err := os.Lstat(path)
+
+ if err == nil && stat.Mode()&os.ModeSymlink == 0 {
+ // The path exists and is not a symlink.
+ return CopyInfo{
+ Path: path,
+ Exists: true,
+ IsDir: stat.IsDir(),
+ }, nil
+ }
+
+ // While the path is a symlink.
+ for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
+ if n > maxSymlinkIter {
+ // Don't follow symlinks more than this arbitrary number of times.
+ return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
+ }
+
+ // The path is a symbolic link. We need to evaluate it so that the
+ // destination of the copy operation is the link target and not the
+ // link itself. This is notably different than CopyInfoSourcePath which
+ // only evaluates symlinks before the last appearing path separator.
+ // Also note that it is okay if the last path element is a broken
+ // symlink as the copy operation should create the target.
+ var linkTarget string
+
+ linkTarget, err = os.Readlink(path)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+
+ if !system.IsAbs(linkTarget) {
+ // Join with the parent directory.
+ dstParent, _ := SplitPathDirEntry(path)
+ linkTarget = filepath.Join(dstParent, linkTarget)
+ }
+
+ path = linkTarget
+ stat, err = os.Lstat(path)
+ }
+
+ if err != nil {
+ // It's okay if the destination path doesn't exist. We can still
+ // continue the copy operation if the parent directory exists.
+ if !os.IsNotExist(err) {
+ return CopyInfo{}, err
+ }
+
+ // Ensure destination parent dir exists.
+ dstParent, _ := SplitPathDirEntry(path)
+
+ parentDirStat, err := os.Lstat(dstParent)
+ if err != nil {
+ return CopyInfo{}, err
+ }
+ if !parentDirStat.IsDir() {
+ return CopyInfo{}, ErrNotDirectory
+ }
+
+ return CopyInfo{Path: path}, nil
+ }
+
+ // The path exists after resolving symlinks.
+ return CopyInfo{
+ Path: path,
+ Exists: true,
+ IsDir: stat.IsDir(),
+ }, nil
+}
+
+// PrepareArchiveCopy prepares the given srcContent archive, which should
+// contain the archived resource described by srcInfo, to the destination
+// described by dstInfo. Returns the possibly modified content archive along
+// with the path to the destination directory which it should be extracted to.
+func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) {
+ // Ensure in platform semantics
+ srcInfo.Path = normalizePath(srcInfo.Path)
+ dstInfo.Path = normalizePath(dstInfo.Path)
+
+ // Separate the destination path between its directory and base
+ // components in case the source archive contents need to be rebased.
+ dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
+ _, srcBase := SplitPathDirEntry(srcInfo.Path)
+
+ switch {
+ case dstInfo.Exists && dstInfo.IsDir:
+ // The destination exists as a directory. No alteration
+ // to srcContent is needed as its contents can be
+ // simply extracted to the destination directory.
+ return dstInfo.Path, ioutil.NopCloser(srcContent), nil
+ case dstInfo.Exists && srcInfo.IsDir:
+ // The destination exists as some type of file and the source
+ // content is a directory. This is an error condition since
+ // you cannot copy a directory to an existing file location.
+ return "", nil, ErrCannotCopyDir
+ case dstInfo.Exists:
+ // The destination exists as some type of file and the source content
+ // is also a file. The source content entry will have to be renamed to
+ // have a basename which matches the destination path's basename.
+ if len(srcInfo.RebaseName) != 0 {
+ srcBase = srcInfo.RebaseName
+ }
+ return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ case srcInfo.IsDir:
+ // The destination does not exist and the source content is an archive
+ // of a directory. The archive should be extracted to the parent of
+ // the destination path instead, and when it is, the directory that is
+ // created as a result should take the name of the destination path.
+ // The source content entries will have to be renamed to have a
+ // basename which matches the destination path's basename.
+ if len(srcInfo.RebaseName) != 0 {
+ srcBase = srcInfo.RebaseName
+ }
+ return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ case assertsDirectory(dstInfo.Path):
+ // The destination does not exist and is asserted to be created as a
+ // directory, but the source content is not a directory. This is an
+ // error condition since you cannot create a directory from a file
+ // source.
+ return "", nil, ErrDirNotExists
+ default:
+ // The last remaining case is when the destination does not exist, is
+ // not asserted to be a directory, and the source content is not an
+ // archive of a directory. It this case, the destination file will need
+ // to be created when the archive is extracted and the source content
+ // entry will have to be renamed to have a basename which matches the
+ // destination path's basename.
+ if len(srcInfo.RebaseName) != 0 {
+ srcBase = srcInfo.RebaseName
+ }
+ return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
+ }
+
+}
+
+// RebaseArchiveEntries rewrites the given srcContent archive replacing
+// an occurrence of oldBase with newBase at the beginning of entry names.
+func RebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive {
+ if oldBase == string(os.PathSeparator) {
+ // If oldBase specifies the root directory, use an empty string as
+ // oldBase instead so that newBase doesn't replace the path separator
+ // that all paths will start with.
+ oldBase = ""
+ }
+
+ rebased, w := io.Pipe()
+
+ go func() {
+ srcTar := tar.NewReader(srcContent)
+ rebasedTar := tar.NewWriter(w)
+
+ for {
+ hdr, err := srcTar.Next()
+ if err == io.EOF {
+ // Signals end of archive.
+ rebasedTar.Close()
+ w.Close()
+ return
+ }
+ if err != nil {
+ w.CloseWithError(err)
+ return
+ }
+
+ hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
+
+ if err = rebasedTar.WriteHeader(hdr); err != nil {
+ w.CloseWithError(err)
+ return
+ }
+
+ if _, err = io.Copy(rebasedTar, srcTar); err != nil {
+ w.CloseWithError(err)
+ return
+ }
+ }
+ }()
+
+ return rebased
+}
+
+// CopyResource performs an archive copy from the given source path to the
+// given destination path. The source path MUST exist and the destination
+// path's parent directory must exist.
+func CopyResource(srcPath, dstPath string, followLink bool) error {
+ var (
+ srcInfo CopyInfo
+ err error
+ )
+
+ // Ensure in platform semantics
+ srcPath = normalizePath(srcPath)
+ dstPath = normalizePath(dstPath)
+
+ // Clean the source and destination paths.
+ srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath)
+ dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath)
+
+ if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
+ return err
+ }
+
+ content, err := TarResource(srcInfo)
+ if err != nil {
+ return err
+ }
+ defer content.Close()
+
+ return CopyTo(content, srcInfo, dstPath)
+}
+
+// CopyTo handles extracting the given content whose
+// entries should be sourced from srcInfo to dstPath.
+func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error {
+ // The destination path need not exist, but CopyInfoDestinationPath will
+ // ensure that at least the parent directory exists.
+ dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
+ if err != nil {
+ return err
+ }
+
+ dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
+ if err != nil {
+ return err
+ }
+ defer copyArchive.Close()
+
+ options := &TarOptions{
+ NoLchown: true,
+ NoOverwriteDirNonDir: true,
+ }
+
+ return Untar(copyArchive, dstDir, options)
+}
+
+// ResolveHostSourcePath decides real path need to be copied with parameters such as
+// whether to follow symbol link or not, if followLink is true, resolvedPath will return
+// link target of any symbol link file, else it will only resolve symlink of directory
+// but return symbol link file itself without resolving.
+func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) {
+ if followLink {
+ resolvedPath, err = filepath.EvalSymlinks(path)
+ if err != nil {
+ return
+ }
+
+ resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
+ } else {
+ dirPath, basePath := filepath.Split(path)
+
+ // if not follow symbol link, then resolve symbol link of parent dir
+ var resolvedDirPath string
+ resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
+ if err != nil {
+ return
+ }
+ // resolvedDirPath will have been cleaned (no trailing path separators) so
+ // we can manually join it with the base path element.
+ resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
+ if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) {
+ rebaseName = filepath.Base(path)
+ }
+ }
+ return resolvedPath, rebaseName, nil
+}
+
+// GetRebaseName normalizes and compares path and resolvedPath,
+// return completed resolved path and rebased file name
+func GetRebaseName(path, resolvedPath string) (string, string) {
+ // linkTarget will have been cleaned (no trailing path separators and dot) so
+ // we can manually join it with them
+ var rebaseName string
+ if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) {
+ resolvedPath += string(filepath.Separator) + "."
+ }
+
+ if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) {
+ resolvedPath += string(filepath.Separator)
+ }
+
+ if filepath.Base(path) != filepath.Base(resolvedPath) {
+ // In the case where the path had a trailing separator and a symlink
+ // evaluation has changed the last path component, we will need to
+ // rebase the name in the archive that is being copied to match the
+ // originally requested name.
+ rebaseName = filepath.Base(path)
+ }
+ return resolvedPath, rebaseName
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go
similarity index 100%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_unix.go
rename to vendor/github.com/docker/docker/pkg/archive/copy_unix.go
diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix_test.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix_test.go
new file mode 100644
index 00000000000..ecbfc172b01
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/copy_unix_test.go
@@ -0,0 +1,978 @@
+// +build !windows
+
+// TODO Windows: Some of these tests may be salvagable and portable to Windows.
+
+package archive
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+func removeAllPaths(paths ...string) {
+ for _, path := range paths {
+ os.RemoveAll(path)
+ }
+}
+
+func getTestTempDirs(t *testing.T) (tmpDirA, tmpDirB string) {
+ var err error
+
+ if tmpDirA, err = ioutil.TempDir("", "archive-copy-test"); err != nil {
+ t.Fatal(err)
+ }
+
+ if tmpDirB, err = ioutil.TempDir("", "archive-copy-test"); err != nil {
+ t.Fatal(err)
+ }
+
+ return
+}
+
+func isNotDir(err error) bool {
+ return strings.Contains(err.Error(), "not a directory")
+}
+
+func joinTrailingSep(pathElements ...string) string {
+ joined := filepath.Join(pathElements...)
+
+ return fmt.Sprintf("%s%c", joined, filepath.Separator)
+}
+
+func fileContentsEqual(t *testing.T, filenameA, filenameB string) (err error) {
+ t.Logf("checking for equal file contents: %q and %q\n", filenameA, filenameB)
+
+ fileA, err := os.Open(filenameA)
+ if err != nil {
+ return
+ }
+ defer fileA.Close()
+
+ fileB, err := os.Open(filenameB)
+ if err != nil {
+ return
+ }
+ defer fileB.Close()
+
+ hasher := sha256.New()
+
+ if _, err = io.Copy(hasher, fileA); err != nil {
+ return
+ }
+
+ hashA := hasher.Sum(nil)
+ hasher.Reset()
+
+ if _, err = io.Copy(hasher, fileB); err != nil {
+ return
+ }
+
+ hashB := hasher.Sum(nil)
+
+ if !bytes.Equal(hashA, hashB) {
+ err = fmt.Errorf("file content hashes not equal - expected %s, got %s", hex.EncodeToString(hashA), hex.EncodeToString(hashB))
+ }
+
+ return
+}
+
+func dirContentsEqual(t *testing.T, newDir, oldDir string) (err error) {
+ t.Logf("checking for equal directory contents: %q and %q\n", newDir, oldDir)
+
+ var changes []Change
+
+ if changes, err = ChangesDirs(newDir, oldDir); err != nil {
+ return
+ }
+
+ if len(changes) != 0 {
+ err = fmt.Errorf("expected no changes between directories, but got: %v", changes)
+ }
+
+ return
+}
+
+func logDirContents(t *testing.T, dirPath string) {
+ logWalkedPaths := filepath.WalkFunc(func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ t.Errorf("stat error for path %q: %s", path, err)
+ return nil
+ }
+
+ if info.IsDir() {
+ path = joinTrailingSep(path)
+ }
+
+ t.Logf("\t%s", path)
+
+ return nil
+ })
+
+ t.Logf("logging directory contents: %q", dirPath)
+
+ if err := filepath.Walk(dirPath, logWalkedPaths); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func testCopyHelper(t *testing.T, srcPath, dstPath string) (err error) {
+ t.Logf("copying from %q to %q (not follow symbol link)", srcPath, dstPath)
+
+ return CopyResource(srcPath, dstPath, false)
+}
+
+func testCopyHelperFSym(t *testing.T, srcPath, dstPath string) (err error) {
+ t.Logf("copying from %q to %q (follow symbol link)", srcPath, dstPath)
+
+ return CopyResource(srcPath, dstPath, true)
+}
+
+// Basic assumptions about SRC and DST:
+// 1. SRC must exist.
+// 2. If SRC ends with a trailing separator, it must be a directory.
+// 3. DST parent directory must exist.
+// 4. If DST exists as a file, it must not end with a trailing separator.
+
+// First get these easy error cases out of the way.
+
+// Test for error when SRC does not exist.
+func TestCopyErrSrcNotExists(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ if _, err := CopyInfoSourcePath(filepath.Join(tmpDirA, "file1"), false); !os.IsNotExist(err) {
+ t.Fatalf("expected IsNotExist error, but got %T: %s", err, err)
+ }
+}
+
+// Test for error when SRC ends in a trailing
+// path separator but it exists as a file.
+func TestCopyErrSrcNotDir(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+
+ if _, err := CopyInfoSourcePath(joinTrailingSep(tmpDirA, "file1"), false); !isNotDir(err) {
+ t.Fatalf("expected IsNotDir error, but got %T: %s", err, err)
+ }
+}
+
+// Test for error when SRC is a valid file or directory,
+// but the DST parent directory does not exist.
+func TestCopyErrDstParentNotExists(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+
+ srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false}
+
+ // Try with a file source.
+ content, err := TarResource(srcInfo)
+ if err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+ defer content.Close()
+
+ // Copy to a file whose parent does not exist.
+ if err = CopyTo(content, srcInfo, filepath.Join(tmpDirB, "fakeParentDir", "file1")); err == nil {
+ t.Fatal("expected IsNotExist error, but got nil instead")
+ }
+
+ if !os.IsNotExist(err) {
+ t.Fatalf("expected IsNotExist error, but got %T: %s", err, err)
+ }
+
+ // Try with a directory source.
+ srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true}
+
+ content, err = TarResource(srcInfo)
+ if err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+ defer content.Close()
+
+ // Copy to a directory whose parent does not exist.
+ if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "fakeParentDir", "fakeDstDir")); err == nil {
+ t.Fatal("expected IsNotExist error, but got nil instead")
+ }
+
+ if !os.IsNotExist(err) {
+ t.Fatalf("expected IsNotExist error, but got %T: %s", err, err)
+ }
+}
+
+// Test for error when DST ends in a trailing
+// path separator but exists as a file.
+func TestCopyErrDstNotDir(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A and B with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+ createSampleDir(t, tmpDirB)
+
+ // Try with a file source.
+ srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false}
+
+ content, err := TarResource(srcInfo)
+ if err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+ defer content.Close()
+
+ if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil {
+ t.Fatal("expected IsNotDir error, but got nil instead")
+ }
+
+ if !isNotDir(err) {
+ t.Fatalf("expected IsNotDir error, but got %T: %s", err, err)
+ }
+
+ // Try with a directory source.
+ srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true}
+
+ content, err = TarResource(srcInfo)
+ if err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+ defer content.Close()
+
+ if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil {
+ t.Fatal("expected IsNotDir error, but got nil instead")
+ }
+
+ if !isNotDir(err) {
+ t.Fatalf("expected IsNotDir error, but got %T: %s", err, err)
+ }
+}
+
+// Possibilities are reduced to the remaining 10 cases:
+//
+// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action
+// ===================================================================================================
+// A | no | - | no | - | no | create file
+// B | no | - | no | - | yes | error
+// C | no | - | yes | no | - | overwrite file
+// D | no | - | yes | yes | - | create file in dst dir
+// E | yes | no | no | - | - | create dir, copy contents
+// F | yes | no | yes | no | - | error
+// G | yes | no | yes | yes | - | copy dir and contents
+// H | yes | yes | no | - | - | create dir, copy contents
+// I | yes | yes | yes | no | - | error
+// J | yes | yes | yes | yes | - | copy dir contents
+//
+
+// A. SRC specifies a file and DST (no trailing path separator) doesn't
+// exist. This should create a file with the name DST and copy the
+// contents of the source file into it.
+func TestCopyCaseA(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+
+ srcPath := filepath.Join(tmpDirA, "file1")
+ dstPath := filepath.Join(tmpDirB, "itWorks.txt")
+
+ var err error
+
+ if err = testCopyHelper(t, srcPath, dstPath); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = fileContentsEqual(t, srcPath, dstPath); err != nil {
+ t.Fatal(err)
+ }
+ os.Remove(dstPath)
+
+ symlinkPath := filepath.Join(tmpDirA, "symlink3")
+ symlinkPath1 := filepath.Join(tmpDirA, "symlink4")
+ linkTarget := filepath.Join(tmpDirA, "file1")
+
+ if err = testCopyHelperFSym(t, symlinkPath, dstPath); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = fileContentsEqual(t, linkTarget, dstPath); err != nil {
+ t.Fatal(err)
+ }
+ os.Remove(dstPath)
+ if err = testCopyHelperFSym(t, symlinkPath1, dstPath); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = fileContentsEqual(t, linkTarget, dstPath); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// B. SRC specifies a file and DST (with trailing path separator) doesn't
+// exist. This should cause an error because the copy operation cannot
+// create a directory when copying a single file.
+func TestCopyCaseB(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+
+ srcPath := filepath.Join(tmpDirA, "file1")
+ dstDir := joinTrailingSep(tmpDirB, "testDir")
+
+ var err error
+
+ if err = testCopyHelper(t, srcPath, dstDir); err == nil {
+ t.Fatal("expected ErrDirNotExists error, but got nil instead")
+ }
+
+ if err != ErrDirNotExists {
+ t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err)
+ }
+
+ symlinkPath := filepath.Join(tmpDirA, "symlink3")
+
+ if err = testCopyHelperFSym(t, symlinkPath, dstDir); err == nil {
+ t.Fatal("expected ErrDirNotExists error, but got nil instead")
+ }
+ if err != ErrDirNotExists {
+ t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err)
+ }
+
+}
+
+// C. SRC specifies a file and DST exists as a file. This should overwrite
+// the file at DST with the contents of the source file.
+func TestCopyCaseC(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A and B with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+ createSampleDir(t, tmpDirB)
+
+ srcPath := filepath.Join(tmpDirA, "file1")
+ dstPath := filepath.Join(tmpDirB, "file2")
+
+ var err error
+
+ // Ensure they start out different.
+ if err = fileContentsEqual(t, srcPath, dstPath); err == nil {
+ t.Fatal("expected different file contents")
+ }
+
+ if err = testCopyHelper(t, srcPath, dstPath); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = fileContentsEqual(t, srcPath, dstPath); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// C. Symbol link following version:
+// SRC specifies a file and DST exists as a file. This should overwrite
+// the file at DST with the contents of the source file.
+func TestCopyCaseCFSym(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A and B with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+ createSampleDir(t, tmpDirB)
+
+ symlinkPathBad := filepath.Join(tmpDirA, "symlink1")
+ symlinkPath := filepath.Join(tmpDirA, "symlink3")
+ linkTarget := filepath.Join(tmpDirA, "file1")
+ dstPath := filepath.Join(tmpDirB, "file2")
+
+ var err error
+
+ // first to test broken link
+ if err = testCopyHelperFSym(t, symlinkPathBad, dstPath); err == nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ // test symbol link -> symbol link -> target
+ // Ensure they start out different.
+ if err = fileContentsEqual(t, linkTarget, dstPath); err == nil {
+ t.Fatal("expected different file contents")
+ }
+
+ if err = testCopyHelperFSym(t, symlinkPath, dstPath); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = fileContentsEqual(t, linkTarget, dstPath); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// D. SRC specifies a file and DST exists as a directory. This should place
+// a copy of the source file inside it using the basename from SRC. Ensure
+// this works whether DST has a trailing path separator or not.
+func TestCopyCaseD(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A and B with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+ createSampleDir(t, tmpDirB)
+
+ srcPath := filepath.Join(tmpDirA, "file1")
+ dstDir := filepath.Join(tmpDirB, "dir1")
+ dstPath := filepath.Join(dstDir, "file1")
+
+ var err error
+
+ // Ensure that dstPath doesn't exist.
+ if _, err = os.Stat(dstPath); !os.IsNotExist(err) {
+ t.Fatalf("did not expect dstPath %q to exist", dstPath)
+ }
+
+ if err = testCopyHelper(t, srcPath, dstDir); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = fileContentsEqual(t, srcPath, dstPath); err != nil {
+ t.Fatal(err)
+ }
+
+ // Now try again but using a trailing path separator for dstDir.
+
+ if err = os.RemoveAll(dstDir); err != nil {
+ t.Fatalf("unable to remove dstDir: %s", err)
+ }
+
+ if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil {
+ t.Fatalf("unable to make dstDir: %s", err)
+ }
+
+ dstDir = joinTrailingSep(tmpDirB, "dir1")
+
+ if err = testCopyHelper(t, srcPath, dstDir); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = fileContentsEqual(t, srcPath, dstPath); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// D. Symbol link following version:
+// SRC specifies a file and DST exists as a directory. This should place
+// a copy of the source file inside it using the basename from SRC. Ensure
+// this works whether DST has a trailing path separator or not.
+func TestCopyCaseDFSym(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A and B with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+ createSampleDir(t, tmpDirB)
+
+ srcPath := filepath.Join(tmpDirA, "symlink4")
+ linkTarget := filepath.Join(tmpDirA, "file1")
+ dstDir := filepath.Join(tmpDirB, "dir1")
+ dstPath := filepath.Join(dstDir, "symlink4")
+
+ var err error
+
+ // Ensure that dstPath doesn't exist.
+ if _, err = os.Stat(dstPath); !os.IsNotExist(err) {
+ t.Fatalf("did not expect dstPath %q to exist", dstPath)
+ }
+
+ if err = testCopyHelperFSym(t, srcPath, dstDir); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = fileContentsEqual(t, linkTarget, dstPath); err != nil {
+ t.Fatal(err)
+ }
+
+ // Now try again but using a trailing path separator for dstDir.
+
+ if err = os.RemoveAll(dstDir); err != nil {
+ t.Fatalf("unable to remove dstDir: %s", err)
+ }
+
+ if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil {
+ t.Fatalf("unable to make dstDir: %s", err)
+ }
+
+ dstDir = joinTrailingSep(tmpDirB, "dir1")
+
+ if err = testCopyHelperFSym(t, srcPath, dstDir); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = fileContentsEqual(t, linkTarget, dstPath); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// E. SRC specifies a directory and DST does not exist. This should create a
+// directory at DST and copy the contents of the SRC directory into the DST
+// directory. Ensure this works whether DST has a trailing path separator or
+// not.
+func TestCopyCaseE(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+
+ srcDir := filepath.Join(tmpDirA, "dir1")
+ dstDir := filepath.Join(tmpDirB, "testDir")
+
+ var err error
+
+ if err = testCopyHelper(t, srcDir, dstDir); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = dirContentsEqual(t, dstDir, srcDir); err != nil {
+ t.Log("dir contents not equal")
+ logDirContents(t, tmpDirA)
+ logDirContents(t, tmpDirB)
+ t.Fatal(err)
+ }
+
+ // Now try again but using a trailing path separator for dstDir.
+
+ if err = os.RemoveAll(dstDir); err != nil {
+ t.Fatalf("unable to remove dstDir: %s", err)
+ }
+
+ dstDir = joinTrailingSep(tmpDirB, "testDir")
+
+ if err = testCopyHelper(t, srcDir, dstDir); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = dirContentsEqual(t, dstDir, srcDir); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// E. Symbol link following version:
+// SRC specifies a directory and DST does not exist. This should create a
+// directory at DST and copy the contents of the SRC directory into the DST
+// directory. Ensure this works whether DST has a trailing path separator or
+// not.
+func TestCopyCaseEFSym(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+
+ srcDir := filepath.Join(tmpDirA, "dirSymlink")
+ linkTarget := filepath.Join(tmpDirA, "dir1")
+ dstDir := filepath.Join(tmpDirB, "testDir")
+
+ var err error
+
+ if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = dirContentsEqual(t, dstDir, linkTarget); err != nil {
+ t.Log("dir contents not equal")
+ logDirContents(t, tmpDirA)
+ logDirContents(t, tmpDirB)
+ t.Fatal(err)
+ }
+
+ // Now try again but using a trailing path separator for dstDir.
+
+ if err = os.RemoveAll(dstDir); err != nil {
+ t.Fatalf("unable to remove dstDir: %s", err)
+ }
+
+ dstDir = joinTrailingSep(tmpDirB, "testDir")
+
+ if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = dirContentsEqual(t, dstDir, linkTarget); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// F. SRC specifies a directory and DST exists as a file. This should cause an
+// error as it is not possible to overwrite a file with a directory.
+func TestCopyCaseF(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A and B with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+ createSampleDir(t, tmpDirB)
+
+ srcDir := filepath.Join(tmpDirA, "dir1")
+ symSrcDir := filepath.Join(tmpDirA, "dirSymlink")
+ dstFile := filepath.Join(tmpDirB, "file1")
+
+ var err error
+
+ if err = testCopyHelper(t, srcDir, dstFile); err == nil {
+ t.Fatal("expected ErrCannotCopyDir error, but got nil instead")
+ }
+
+ if err != ErrCannotCopyDir {
+ t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err)
+ }
+
+ // now test with symbol link
+ if err = testCopyHelperFSym(t, symSrcDir, dstFile); err == nil {
+ t.Fatal("expected ErrCannotCopyDir error, but got nil instead")
+ }
+
+ if err != ErrCannotCopyDir {
+ t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err)
+ }
+}
+
+// G. SRC specifies a directory and DST exists as a directory. This should copy
+// the SRC directory and all its contents to the DST directory. Ensure this
+// works whether DST has a trailing path separator or not.
+func TestCopyCaseG(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A and B with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+ createSampleDir(t, tmpDirB)
+
+ srcDir := filepath.Join(tmpDirA, "dir1")
+ dstDir := filepath.Join(tmpDirB, "dir2")
+ resultDir := filepath.Join(dstDir, "dir1")
+
+ var err error
+
+ if err = testCopyHelper(t, srcDir, dstDir); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = dirContentsEqual(t, resultDir, srcDir); err != nil {
+ t.Fatal(err)
+ }
+
+ // Now try again but using a trailing path separator for dstDir.
+
+ if err = os.RemoveAll(dstDir); err != nil {
+ t.Fatalf("unable to remove dstDir: %s", err)
+ }
+
+ if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil {
+ t.Fatalf("unable to make dstDir: %s", err)
+ }
+
+ dstDir = joinTrailingSep(tmpDirB, "dir2")
+
+ if err = testCopyHelper(t, srcDir, dstDir); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = dirContentsEqual(t, resultDir, srcDir); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// G. Symbol link version:
+// SRC specifies a directory and DST exists as a directory. This should copy
+// the SRC directory and all its contents to the DST directory. Ensure this
+// works whether DST has a trailing path separator or not.
+func TestCopyCaseGFSym(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A and B with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+ createSampleDir(t, tmpDirB)
+
+ srcDir := filepath.Join(tmpDirA, "dirSymlink")
+ linkTarget := filepath.Join(tmpDirA, "dir1")
+ dstDir := filepath.Join(tmpDirB, "dir2")
+ resultDir := filepath.Join(dstDir, "dirSymlink")
+
+ var err error
+
+ if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = dirContentsEqual(t, resultDir, linkTarget); err != nil {
+ t.Fatal(err)
+ }
+
+ // Now try again but using a trailing path separator for dstDir.
+
+ if err = os.RemoveAll(dstDir); err != nil {
+ t.Fatalf("unable to remove dstDir: %s", err)
+ }
+
+ if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil {
+ t.Fatalf("unable to make dstDir: %s", err)
+ }
+
+ dstDir = joinTrailingSep(tmpDirB, "dir2")
+
+ if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = dirContentsEqual(t, resultDir, linkTarget); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// H. SRC specifies a directory's contents only and DST does not exist. This
+// should create a directory at DST and copy the contents of the SRC
+// directory (but not the directory itself) into the DST directory. Ensure
+// this works whether DST has a trailing path separator or not.
+func TestCopyCaseH(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+
+ srcDir := joinTrailingSep(tmpDirA, "dir1") + "."
+ dstDir := filepath.Join(tmpDirB, "testDir")
+
+ var err error
+
+ if err = testCopyHelper(t, srcDir, dstDir); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = dirContentsEqual(t, dstDir, srcDir); err != nil {
+ t.Log("dir contents not equal")
+ logDirContents(t, tmpDirA)
+ logDirContents(t, tmpDirB)
+ t.Fatal(err)
+ }
+
+ // Now try again but using a trailing path separator for dstDir.
+
+ if err = os.RemoveAll(dstDir); err != nil {
+ t.Fatalf("unable to remove dstDir: %s", err)
+ }
+
+ dstDir = joinTrailingSep(tmpDirB, "testDir")
+
+ if err = testCopyHelper(t, srcDir, dstDir); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = dirContentsEqual(t, dstDir, srcDir); err != nil {
+ t.Log("dir contents not equal")
+ logDirContents(t, tmpDirA)
+ logDirContents(t, tmpDirB)
+ t.Fatal(err)
+ }
+}
+
+// H. Symbol link following version:
+// SRC specifies a directory's contents only and DST does not exist. This
+// should create a directory at DST and copy the contents of the SRC
+// directory (but not the directory itself) into the DST directory. Ensure
+// this works whether DST has a trailing path separator or not.
+func TestCopyCaseHFSym(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+
+ srcDir := joinTrailingSep(tmpDirA, "dirSymlink") + "."
+ linkTarget := filepath.Join(tmpDirA, "dir1")
+ dstDir := filepath.Join(tmpDirB, "testDir")
+
+ var err error
+
+ if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = dirContentsEqual(t, dstDir, linkTarget); err != nil {
+ t.Log("dir contents not equal")
+ logDirContents(t, tmpDirA)
+ logDirContents(t, tmpDirB)
+ t.Fatal(err)
+ }
+
+ // Now try again but using a trailing path separator for dstDir.
+
+ if err = os.RemoveAll(dstDir); err != nil {
+ t.Fatalf("unable to remove dstDir: %s", err)
+ }
+
+ dstDir = joinTrailingSep(tmpDirB, "testDir")
+
+ if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = dirContentsEqual(t, dstDir, linkTarget); err != nil {
+ t.Log("dir contents not equal")
+ logDirContents(t, tmpDirA)
+ logDirContents(t, tmpDirB)
+ t.Fatal(err)
+ }
+}
+
+// I. SRC specifies a directory's contents only and DST exists as a file. This
+// should cause an error as it is not possible to overwrite a file with a
+// directory.
+func TestCopyCaseI(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A and B with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+ createSampleDir(t, tmpDirB)
+
+ srcDir := joinTrailingSep(tmpDirA, "dir1") + "."
+ symSrcDir := filepath.Join(tmpDirB, "dirSymlink")
+ dstFile := filepath.Join(tmpDirB, "file1")
+
+ var err error
+
+ if err = testCopyHelper(t, srcDir, dstFile); err == nil {
+ t.Fatal("expected ErrCannotCopyDir error, but got nil instead")
+ }
+
+ if err != ErrCannotCopyDir {
+ t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err)
+ }
+
+ // now try with symbol link of dir
+ if err = testCopyHelperFSym(t, symSrcDir, dstFile); err == nil {
+ t.Fatal("expected ErrCannotCopyDir error, but got nil instead")
+ }
+
+ if err != ErrCannotCopyDir {
+ t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err)
+ }
+}
+
+// J. SRC specifies a directory's contents only and DST exists as a directory.
+// This should copy the contents of the SRC directory (but not the directory
+// itself) into the DST directory. Ensure this works whether DST has a
+// trailing path separator or not.
+func TestCopyCaseJ(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A and B with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+ createSampleDir(t, tmpDirB)
+
+ srcDir := joinTrailingSep(tmpDirA, "dir1") + "."
+ dstDir := filepath.Join(tmpDirB, "dir5")
+
+ var err error
+
+ // first to create an empty dir
+ if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil {
+ t.Fatalf("unable to make dstDir: %s", err)
+ }
+
+ if err = testCopyHelper(t, srcDir, dstDir); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = dirContentsEqual(t, dstDir, srcDir); err != nil {
+ t.Fatal(err)
+ }
+
+ // Now try again but using a trailing path separator for dstDir.
+
+ if err = os.RemoveAll(dstDir); err != nil {
+ t.Fatalf("unable to remove dstDir: %s", err)
+ }
+
+ if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil {
+ t.Fatalf("unable to make dstDir: %s", err)
+ }
+
+ dstDir = joinTrailingSep(tmpDirB, "dir5")
+
+ if err = testCopyHelper(t, srcDir, dstDir); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = dirContentsEqual(t, dstDir, srcDir); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// J. Symbol link following version:
+// SRC specifies a directory's contents only and DST exists as a directory.
+// This should copy the contents of the SRC directory (but not the directory
+// itself) into the DST directory. Ensure this works whether DST has a
+// trailing path separator or not.
+func TestCopyCaseJFSym(t *testing.T) {
+ tmpDirA, tmpDirB := getTestTempDirs(t)
+ defer removeAllPaths(tmpDirA, tmpDirB)
+
+ // Load A and B with some sample files and directories.
+ createSampleDir(t, tmpDirA)
+ createSampleDir(t, tmpDirB)
+
+ srcDir := joinTrailingSep(tmpDirA, "dirSymlink") + "."
+ linkTarget := filepath.Join(tmpDirA, "dir1")
+ dstDir := filepath.Join(tmpDirB, "dir5")
+
+ var err error
+
+ // first to create an empty dir
+ if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil {
+ t.Fatalf("unable to make dstDir: %s", err)
+ }
+
+ if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = dirContentsEqual(t, dstDir, linkTarget); err != nil {
+ t.Fatal(err)
+ }
+
+ // Now try again but using a trailing path separator for dstDir.
+
+ if err = os.RemoveAll(dstDir); err != nil {
+ t.Fatalf("unable to remove dstDir: %s", err)
+ }
+
+ if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil {
+ t.Fatalf("unable to make dstDir: %s", err)
+ }
+
+ dstDir = joinTrailingSep(tmpDirB, "dir5")
+
+ if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil {
+ t.Fatalf("unexpected error %T: %s", err, err)
+ }
+
+ if err = dirContentsEqual(t, dstDir, linkTarget); err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go
similarity index 100%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_windows.go
rename to vendor/github.com/docker/docker/pkg/archive/copy_windows.go
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go
similarity index 63%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go
rename to vendor/github.com/docker/docker/pkg/archive/diff.go
index 10a63a051bb..1b08ad33ab6 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go
+++ b/vendor/github.com/docker/docker/pkg/archive/diff.go
@@ -9,23 +9,41 @@ import (
"path/filepath"
"runtime"
"strings"
- "syscall"
- "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
- "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools"
- "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/idtools"
+ "github.com/docker/docker/pkg/pools"
+ "github.com/docker/docker/pkg/system"
)
-func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
+// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
+// compressed or uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, err error) {
tr := tar.NewReader(layer)
trBuf := pools.BufioReader32KPool.Get(tr)
defer pools.BufioReader32KPool.Put(trBuf)
var dirs []*tar.Header
+ unpackedPaths := make(map[string]struct{})
+
+ if options == nil {
+ options = &TarOptions{}
+ }
+ if options.ExcludePatterns == nil {
+ options.ExcludePatterns = []string{}
+ }
+ remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
+ if err != nil {
+ return 0, err
+ }
aufsTempdir := ""
aufsHardlinks := make(map[string]*tar.Header)
+ if options == nil {
+ options = &TarOptions{}
+ }
// Iterate through the files in the archive.
for {
hdr, err := tr.Next()
@@ -55,7 +73,7 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
// TODO Windows. Once the registry is aware of what images are Windows-
// specific or Linux-specific, this warning should be changed to an error
// to cater for the situation where someone does manage to upload a Linux
- // image but have it tagged as Windows inadvertantly.
+ // image but have it tagged as Windows inadvertently.
if runtime.GOOS == "windows" {
if strings.Contains(hdr.Name, ":") {
logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
@@ -80,11 +98,11 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
}
// Skip AUFS metadata dirs
- if strings.HasPrefix(hdr.Name, ".wh..wh.") {
+ if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
// Regular files inside /.wh..wh.plnk can be used as hardlink targets
// We don't want this directory, but we need the files in them so that
// such hardlinks can be resolved.
- if strings.HasPrefix(hdr.Name, ".wh..wh.plnk") && hdr.Typeflag == tar.TypeReg {
+ if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
basename := filepath.Base(hdr.Name)
aufsHardlinks[basename] = hdr
if aufsTempdir == "" {
@@ -97,7 +115,10 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
return 0, err
}
}
- continue
+
+ if hdr.Name != WhiteoutOpaqueDir {
+ continue
+ }
}
path := filepath.Join(dest, hdr.Name)
rel, err := filepath.Rel(dest, path)
@@ -111,11 +132,38 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
}
base := filepath.Base(path)
- if strings.HasPrefix(base, ".wh.") {
- originalBase := base[len(".wh."):]
- originalPath := filepath.Join(filepath.Dir(path), originalBase)
- if err := os.RemoveAll(originalPath); err != nil {
- return 0, err
+ if strings.HasPrefix(base, WhiteoutPrefix) {
+ dir := filepath.Dir(path)
+ if base == WhiteoutOpaqueDir {
+ _, err := os.Lstat(dir)
+ if err != nil {
+ return 0, err
+ }
+ err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ if os.IsNotExist(err) {
+ err = nil // parent was deleted
+ }
+ return err
+ }
+ if path == dir {
+ return nil
+ }
+ if _, exists := unpackedPaths[path]; !exists {
+ err := os.RemoveAll(path)
+ return err
+ }
+ return nil
+ })
+ if err != nil {
+ return 0, err
+ }
+ } else {
+ originalBase := base[len(WhiteoutPrefix):]
+ originalPath := filepath.Join(dir, originalBase)
+ if err := os.RemoveAll(originalPath); err != nil {
+ return 0, err
+ }
}
} else {
// If path exits we almost always just want to remove and replace it.
@@ -136,7 +184,7 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
// we manually retarget these into the temporary files we extracted them into
- if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), ".wh..wh.plnk") {
+ if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
linkBasename := filepath.Base(hdr.Linkname)
srcHdr = aufsHardlinks[linkBasename]
if srcHdr == nil {
@@ -150,6 +198,27 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
srcData = tmpFile
}
+ // if the options contain a uid & gid maps, convert header uid/gid
+ // entries using the maps such that lchown sets the proper mapped
+ // uid/gid after writing the file. We only perform this mapping if
+ // the file isn't already owned by the remapped root UID or GID, as
+ // that specific uid/gid has no mapping from container -> host, and
+ // those files already have the proper ownership for inside the
+ // container.
+ if srcHdr.Uid != remappedRootUID {
+ xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps)
+ if err != nil {
+ return 0, err
+ }
+ srcHdr.Uid = xUID
+ }
+ if srcHdr.Gid != remappedRootGID {
+ xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps)
+ if err != nil {
+ return 0, err
+ }
+ srcHdr.Gid = xGID
+ }
if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil {
return 0, err
}
@@ -159,13 +228,13 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
if hdr.Typeflag == tar.TypeDir {
dirs = append(dirs, hdr)
}
+ unpackedPaths[path] = struct{}{}
}
}
for _, hdr := range dirs {
path := filepath.Join(dest, hdr.Name)
- ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
- if err := syscall.UtimesNano(path, ts); err != nil {
+ if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
return 0, err
}
}
@@ -177,20 +246,20 @@ func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
// and applies it to the directory `dest`. The stream `layer` can be
// compressed or uncompressed.
// Returns the size in bytes of the contents of the layer.
-func ApplyLayer(dest string, layer ArchiveReader) (int64, error) {
- return applyLayerHandler(dest, layer, true)
+func ApplyLayer(dest string, layer Reader) (int64, error) {
+ return applyLayerHandler(dest, layer, &TarOptions{}, true)
}
// ApplyUncompressedLayer parses a diff in the standard layer format from
// `layer`, and applies it to the directory `dest`. The stream `layer`
// can only be uncompressed.
// Returns the size in bytes of the contents of the layer.
-func ApplyUncompressedLayer(dest string, layer ArchiveReader) (int64, error) {
- return applyLayerHandler(dest, layer, false)
+func ApplyUncompressedLayer(dest string, layer Reader, options *TarOptions) (int64, error) {
+ return applyLayerHandler(dest, layer, options, false)
}
// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
-func applyLayerHandler(dest string, layer ArchiveReader, decompress bool) (int64, error) {
+func applyLayerHandler(dest string, layer Reader, options *TarOptions, decompress bool) (int64, error) {
dest = filepath.Clean(dest)
// We need to be able to set any perms
@@ -206,5 +275,5 @@ func applyLayerHandler(dest string, layer ArchiveReader, decompress bool) (int64
return 0, err
}
}
- return UnpackLayer(dest, layer)
+ return UnpackLayer(dest, layer, options)
}
diff --git a/vendor/github.com/docker/docker/pkg/archive/diff_test.go b/vendor/github.com/docker/docker/pkg/archive/diff_test.go
new file mode 100644
index 00000000000..8167941ac07
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/diff_test.go
@@ -0,0 +1,386 @@
+package archive
+
+import (
+ "archive/tar"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "testing"
+
+ "github.com/docker/docker/pkg/ioutils"
+)
+
+func TestApplyLayerInvalidFilenames(t *testing.T) {
+ // TODO Windows: Figure out how to fix this test.
+ if runtime.GOOS == "windows" {
+ t.Skip("Passes but hits breakoutError: platform and architecture is not supported")
+ }
+ for i, headers := range [][]*tar.Header{
+ {
+ {
+ Name: "../victim/dotdot",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ {
+ {
+ // Note the leading slash
+ Name: "/../victim/slash-dotdot",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ } {
+ if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil {
+ t.Fatalf("i=%d. %v", i, err)
+ }
+ }
+}
+
+func TestApplyLayerInvalidHardlink(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("TypeLink support on Windows")
+ }
+ for i, headers := range [][]*tar.Header{
+ { // try reading victim/hello (../)
+ {
+ Name: "dotdot",
+ Typeflag: tar.TypeLink,
+ Linkname: "../victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try reading victim/hello (/../)
+ {
+ Name: "slash-dotdot",
+ Typeflag: tar.TypeLink,
+ // Note the leading slash
+ Linkname: "/../victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try writing victim/file
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeLink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "loophole-victim/file",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ { // try reading victim/hello (hardlink, symlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeLink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "symlink",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "loophole-victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // Try reading victim/hello (hardlink, hardlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeLink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "hardlink",
+ Typeflag: tar.TypeLink,
+ Linkname: "loophole-victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // Try removing victim directory (hardlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeLink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ } {
+ if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil {
+ t.Fatalf("i=%d. %v", i, err)
+ }
+ }
+}
+
+func TestApplyLayerInvalidSymlink(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("TypeSymLink support on Windows")
+ }
+ for i, headers := range [][]*tar.Header{
+ { // try reading victim/hello (../)
+ {
+ Name: "dotdot",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try reading victim/hello (/../)
+ {
+ Name: "slash-dotdot",
+ Typeflag: tar.TypeSymlink,
+ // Note the leading slash
+ Linkname: "/../victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try writing victim/file
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "loophole-victim/file",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ { // try reading victim/hello (symlink, symlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "symlink",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "loophole-victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try reading victim/hello (symlink, hardlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "hardlink",
+ Typeflag: tar.TypeLink,
+ Linkname: "loophole-victim/hello",
+ Mode: 0644,
+ },
+ },
+ { // try removing victim directory (symlink)
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeSymlink,
+ Linkname: "../victim",
+ Mode: 0755,
+ },
+ {
+ Name: "loophole-victim",
+ Typeflag: tar.TypeReg,
+ Mode: 0644,
+ },
+ },
+ } {
+ if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil {
+ t.Fatalf("i=%d. %v", i, err)
+ }
+ }
+}
+
+func TestApplyLayerWhiteouts(t *testing.T) {
+ // TODO Windows: Figure out why this test fails
+ if runtime.GOOS == "windows" {
+ t.Skip("Failing on Windows")
+ }
+
+ wd, err := ioutil.TempDir("", "graphdriver-test-whiteouts")
+ if err != nil {
+ return
+ }
+ defer os.RemoveAll(wd)
+
+ base := []string{
+ ".baz",
+ "bar/",
+ "bar/bax",
+ "bar/bay/",
+ "baz",
+ "foo/",
+ "foo/.abc",
+ "foo/.bcd/",
+ "foo/.bcd/a",
+ "foo/cde/",
+ "foo/cde/def",
+ "foo/cde/efg",
+ "foo/fgh",
+ "foobar",
+ }
+
+ type tcase struct {
+ change, expected []string
+ }
+
+ tcases := []tcase{
+ {
+ base,
+ base,
+ },
+ {
+ []string{
+ ".bay",
+ ".wh.baz",
+ "foo/",
+ "foo/.bce",
+ "foo/.wh..wh..opq",
+ "foo/cde/",
+ "foo/cde/efg",
+ },
+ []string{
+ ".bay",
+ ".baz",
+ "bar/",
+ "bar/bax",
+ "bar/bay/",
+ "foo/",
+ "foo/.bce",
+ "foo/cde/",
+ "foo/cde/efg",
+ "foobar",
+ },
+ },
+ {
+ []string{
+ ".bay",
+ ".wh..baz",
+ ".wh.foobar",
+ "foo/",
+ "foo/.abc",
+ "foo/.wh.cde",
+ "bar/",
+ },
+ []string{
+ ".bay",
+ "bar/",
+ "bar/bax",
+ "bar/bay/",
+ "foo/",
+ "foo/.abc",
+ "foo/.bce",
+ },
+ },
+ {
+ []string{
+ ".abc",
+ ".wh..wh..opq",
+ "foobar",
+ },
+ []string{
+ ".abc",
+ "foobar",
+ },
+ },
+ }
+
+ for i, tc := range tcases {
+ l, err := makeTestLayer(tc.change)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = UnpackLayer(wd, l, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = l.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ paths, err := readDirContents(wd)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !reflect.DeepEqual(tc.expected, paths) {
+ t.Fatalf("invalid files for layer %d: expected %q, got %q", i, tc.expected, paths)
+ }
+ }
+
+}
+
+func makeTestLayer(paths []string) (rc io.ReadCloser, err error) {
+ tmpDir, err := ioutil.TempDir("", "graphdriver-test-mklayer")
+ if err != nil {
+ return
+ }
+ defer func() {
+ if err != nil {
+ os.RemoveAll(tmpDir)
+ }
+ }()
+ for _, p := range paths {
+ if p[len(p)-1] == filepath.Separator {
+ if err = os.MkdirAll(filepath.Join(tmpDir, p), 0700); err != nil {
+ return
+ }
+ } else {
+ if err = ioutil.WriteFile(filepath.Join(tmpDir, p), nil, 0600); err != nil {
+ return
+ }
+ }
+ }
+ archive, err := Tar(tmpDir, Uncompressed)
+ if err != nil {
+ return
+ }
+ return ioutils.NewReadCloserWrapper(archive, func() error {
+ err := archive.Close()
+ os.RemoveAll(tmpDir)
+ return err
+ }), nil
+}
+
+func readDirContents(root string) ([]string, error) {
+ var files []string
+ err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if path == root {
+ return nil
+ }
+ rel, err := filepath.Rel(root, path)
+ if err != nil {
+ return err
+ }
+ if info.IsDir() {
+ rel = rel + "/"
+ }
+ files = append(files, rel)
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return files, nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/example_changes.go b/vendor/github.com/docker/docker/pkg/archive/example_changes.go
similarity index 93%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/example_changes.go
rename to vendor/github.com/docker/docker/pkg/archive/example_changes.go
index a5e08e4ee96..cedd46a408e 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/example_changes.go
+++ b/vendor/github.com/docker/docker/pkg/archive/example_changes.go
@@ -13,8 +13,8 @@ import (
"os"
"path"
- "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
- "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive"
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/archive"
)
var (
diff --git a/vendor/github.com/docker/docker/pkg/archive/testdata/broken.tar b/vendor/github.com/docker/docker/pkg/archive/testdata/broken.tar
new file mode 100644
index 00000000000..8f10ea6b87d
Binary files /dev/null and b/vendor/github.com/docker/docker/pkg/archive/testdata/broken.tar differ
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/docker/docker/pkg/archive/time_linux.go
similarity index 100%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_linux.go
rename to vendor/github.com/docker/docker/pkg/archive/time_linux.go
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
similarity index 100%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_unsupported.go
rename to vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
diff --git a/vendor/github.com/docker/docker/pkg/archive/utils_test.go b/vendor/github.com/docker/docker/pkg/archive/utils_test.go
new file mode 100644
index 00000000000..98719032f34
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/utils_test.go
@@ -0,0 +1,166 @@
+package archive
+
+import (
+ "archive/tar"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "time"
+)
+
+var testUntarFns = map[string]func(string, io.Reader) error{
+ "untar": func(dest string, r io.Reader) error {
+ return Untar(r, dest, nil)
+ },
+ "applylayer": func(dest string, r io.Reader) error {
+ _, err := ApplyLayer(dest, Reader(r))
+ return err
+ },
+}
+
+// testBreakout is a helper function that, within the provided `tmpdir` directory,
+// creates a `victim` folder with a generated `hello` file in it.
+// `untar` extracts to a directory named `dest`, the tar file created from `headers`.
+//
+// Here are the tested scenarios:
+// - removed `victim` folder (write)
+// - removed files from `victim` folder (write)
+// - new files in `victim` folder (write)
+// - modified files in `victim` folder (write)
+// - file in `dest` with same content as `victim/hello` (read)
+//
+// When using testBreakout make sure you cover one of the scenarios listed above.
+func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error {
+ tmpdir, err := ioutil.TempDir("", tmpdir)
+ if err != nil {
+ return err
+ }
+ defer os.RemoveAll(tmpdir)
+
+ dest := filepath.Join(tmpdir, "dest")
+ if err := os.Mkdir(dest, 0755); err != nil {
+ return err
+ }
+
+ victim := filepath.Join(tmpdir, "victim")
+ if err := os.Mkdir(victim, 0755); err != nil {
+ return err
+ }
+ hello := filepath.Join(victim, "hello")
+ helloData, err := time.Now().MarshalText()
+ if err != nil {
+ return err
+ }
+ if err := ioutil.WriteFile(hello, helloData, 0644); err != nil {
+ return err
+ }
+ helloStat, err := os.Stat(hello)
+ if err != nil {
+ return err
+ }
+
+ reader, writer := io.Pipe()
+ go func() {
+ t := tar.NewWriter(writer)
+ for _, hdr := range headers {
+ t.WriteHeader(hdr)
+ }
+ t.Close()
+ }()
+
+ untar := testUntarFns[untarFn]
+ if untar == nil {
+ return fmt.Errorf("could not find untar function %q in testUntarFns", untarFn)
+ }
+ if err := untar(dest, reader); err != nil {
+ if _, ok := err.(breakoutError); !ok {
+ // If untar returns an error unrelated to an archive breakout,
+ // then consider this an unexpected error and abort.
+ return err
+ }
+ // Here, untar detected the breakout.
+ // Let's move on verifying that indeed there was no breakout.
+ fmt.Printf("breakoutError: %v\n", err)
+ }
+
+ // Check victim folder
+ f, err := os.Open(victim)
+ if err != nil {
+ // codepath taken if victim folder was removed
+ return fmt.Errorf("archive breakout: error reading %q: %v", victim, err)
+ }
+ defer f.Close()
+
+ // Check contents of victim folder
+ //
+ // We are only interested in getting 2 files from the victim folder, because if all is well
+ // we expect only one result, the `hello` file. If there is a second result, it cannot
+ // hold the same name `hello` and we assume that a new file got created in the victim folder.
+ // That is enough to detect an archive breakout.
+ names, err := f.Readdirnames(2)
+ if err != nil {
+ // codepath taken if victim is not a folder
+ return fmt.Errorf("archive breakout: error reading directory content of %q: %v", victim, err)
+ }
+ for _, name := range names {
+ if name != "hello" {
+ // codepath taken if new file was created in victim folder
+ return fmt.Errorf("archive breakout: new file %q", name)
+ }
+ }
+
+ // Check victim/hello
+ f, err = os.Open(hello)
+ if err != nil {
+ // codepath taken if read permissions were removed
+ return fmt.Errorf("archive breakout: could not lstat %q: %v", hello, err)
+ }
+ defer f.Close()
+ b, err := ioutil.ReadAll(f)
+ if err != nil {
+ return err
+ }
+ fi, err := f.Stat()
+ if err != nil {
+ return err
+ }
+ if helloStat.IsDir() != fi.IsDir() ||
+ // TODO: cannot check for fi.ModTime() change
+ helloStat.Mode() != fi.Mode() ||
+ helloStat.Size() != fi.Size() ||
+ !bytes.Equal(helloData, b) {
+ // codepath taken if hello has been modified
+ return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v", hello, helloData, b, helloStat, fi)
+ }
+
+ // Check that nothing in dest/ has the same content as victim/hello.
+ // Since victim/hello was generated with time.Now(), it is safe to assume
+ // that any file whose content matches exactly victim/hello, managed somehow
+ // to access victim/hello.
+ return filepath.Walk(dest, func(path string, info os.FileInfo, err error) error {
+ if info.IsDir() {
+ if err != nil {
+ // skip directory if error
+ return filepath.SkipDir
+ }
+ // enter directory
+ return nil
+ }
+ if err != nil {
+ // skip file if error
+ return nil
+ }
+ b, err := ioutil.ReadFile(path)
+ if err != nil {
+ // Houston, we have a problem. Aborting (space)walk.
+ return err
+ }
+ if bytes.Equal(helloData, b) {
+ return fmt.Errorf("archive breakout: file %q has been accessed via %q", hello, path)
+ }
+ return nil
+ })
+}
diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go
new file mode 100644
index 00000000000..d20478a10dc
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go
@@ -0,0 +1,23 @@
+package archive
+
+// Whiteouts are files with a special meaning for the layered filesystem.
+// Docker uses AUFS whiteout files inside exported archives. In other
+// filesystems these files are generated/handled on tar creation/extraction.
+
+// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
+// filename this means that file has been removed from the base layer.
+const WhiteoutPrefix = ".wh."
+
+// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
+// for removing an actual file. Normally these files are excluded from exported
+// archives.
+const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
+
+// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
+// layers. Normally these should not go into exported archives and all changed
+// hardlinks should be copied to the top layer.
+const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
+
+// WhiteoutOpaqueDir file means directory has been made opaque - meaning
+// readdir calls to this directory do not follow to lower layers.
+const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go
similarity index 100%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap.go
rename to vendor/github.com/docker/docker/pkg/archive/wrap.go
diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap_test.go b/vendor/github.com/docker/docker/pkg/archive/wrap_test.go
new file mode 100644
index 00000000000..46ab36697a7
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/archive/wrap_test.go
@@ -0,0 +1,98 @@
+package archive
+
+import (
+ "archive/tar"
+ "bytes"
+ "io"
+ "testing"
+)
+
+func TestGenerateEmptyFile(t *testing.T) {
+ archive, err := Generate("emptyFile")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if archive == nil {
+ t.Fatal("The generated archive should not be nil.")
+ }
+
+ expectedFiles := [][]string{
+ {"emptyFile", ""},
+ }
+
+ tr := tar.NewReader(archive)
+ actualFiles := make([][]string, 0, 10)
+ i := 0
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ buf := new(bytes.Buffer)
+ buf.ReadFrom(tr)
+ content := buf.String()
+ actualFiles = append(actualFiles, []string{hdr.Name, content})
+ i++
+ }
+ if len(actualFiles) != len(expectedFiles) {
+ t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles))
+ }
+ for i := 0; i < len(expectedFiles); i++ {
+ actual := actualFiles[i]
+ expected := expectedFiles[i]
+ if actual[0] != expected[0] {
+ t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0])
+ }
+ if actual[1] != expected[1] {
+ t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1])
+ }
+ }
+}
+
+func TestGenerateWithContent(t *testing.T) {
+ archive, err := Generate("file", "content")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if archive == nil {
+ t.Fatal("The generated archive should not be nil.")
+ }
+
+ expectedFiles := [][]string{
+ {"file", "content"},
+ }
+
+ tr := tar.NewReader(archive)
+ actualFiles := make([][]string, 0, 10)
+ i := 0
+ for {
+ hdr, err := tr.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ buf := new(bytes.Buffer)
+ buf.ReadFrom(tr)
+ content := buf.String()
+ actualFiles = append(actualFiles, []string{hdr.Name, content})
+ i++
+ }
+ if len(actualFiles) != len(expectedFiles) {
+ t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles))
+ }
+ for i := 0; i < len(expectedFiles); i++ {
+ actual := actualFiles[i]
+ expected := expectedFiles[i]
+ if actual[0] != expected[0] {
+ t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0])
+ }
+ if actual[1] != expected[1] {
+ t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1])
+ }
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/authorization/api.go b/vendor/github.com/docker/docker/pkg/authorization/api.go
new file mode 100644
index 00000000000..fc82c46b01a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/authorization/api.go
@@ -0,0 +1,54 @@
+package authorization
+
+const (
+ // AuthZApiRequest is the url for daemon request authorization
+ AuthZApiRequest = "AuthZPlugin.AuthZReq"
+
+ // AuthZApiResponse is the url for daemon response authorization
+ AuthZApiResponse = "AuthZPlugin.AuthZRes"
+
+ // AuthZApiImplements is the name of the interface all AuthZ plugins implement
+ AuthZApiImplements = "authz"
+)
+
+// Request holds data required for authZ plugins
+type Request struct {
+ // User holds the user extracted by AuthN mechanism
+ User string `json:"User,omitempty"`
+
+ // UserAuthNMethod holds the mechanism used to extract user details (e.g., krb)
+ UserAuthNMethod string `json:"UserAuthNMethod,omitempty"`
+
+ // RequestMethod holds the HTTP method (GET/POST/PUT)
+ RequestMethod string `json:"RequestMethod,omitempty"`
+
+ // RequestUri holds the full HTTP uri (e.g., /v1.21/version)
+ RequestURI string `json:"RequestUri,omitempty"`
+
+ // RequestBody stores the raw request body sent to the docker daemon
+ RequestBody []byte `json:"RequestBody,omitempty"`
+
+ // RequestHeaders stores the raw request headers sent to the docker daemon
+ RequestHeaders map[string]string `json:"RequestHeaders,omitempty"`
+
+ // ResponseStatusCode stores the status code returned from docker daemon
+ ResponseStatusCode int `json:"ResponseStatusCode,omitempty"`
+
+ // ResponseBody stores the raw response body sent from docker daemon
+ ResponseBody []byte `json:"ResponseBody,omitempty"`
+
+ // ResponseHeaders stores the response headers sent to the docker daemon
+ ResponseHeaders map[string]string `json:"ResponseHeaders,omitempty"`
+}
+
+// Response represents authZ plugin response
+type Response struct {
+ // Allow indicating whether the user is allowed or not
+ Allow bool `json:"Allow"`
+
+ // Msg stores the authorization message
+ Msg string `json:"Msg,omitempty"`
+
+ // Err stores a message in case there's an error
+ Err string `json:"Err,omitempty"`
+}
diff --git a/vendor/github.com/docker/docker/pkg/authorization/authz.go b/vendor/github.com/docker/docker/pkg/authorization/authz.go
new file mode 100644
index 00000000000..1f960289ad5
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/authorization/authz.go
@@ -0,0 +1,179 @@
+package authorization
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/ioutils"
+)
+
+const maxBodySize = 1048576 // 1MB
+
+// NewCtx creates new authZ context, it is used to store authorization information related to a specific docker
+// REST http session
+// A context provides two method:
+// Authenticate Request:
+// Call authZ plugins with current REST request and AuthN response
+// Request contains full HTTP packet sent to the docker daemon
+// https://docs.docker.com/reference/api/docker_remote_api/
+//
+// Authenticate Response:
+// Call authZ plugins with full info about current REST request, REST response and AuthN response
+// The response from this method may contains content that overrides the daemon response
+// This allows authZ plugins to filter privileged content
+//
+// If multiple authZ plugins are specified, the block/allow decision is based on ANDing all plugin results
+// For response manipulation, the response from each plugin is piped between plugins. Plugin execution order
+// is determined according to daemon parameters
+func NewCtx(authZPlugins []Plugin, user, userAuthNMethod, requestMethod, requestURI string) *Ctx {
+ return &Ctx{
+ plugins: authZPlugins,
+ user: user,
+ userAuthNMethod: userAuthNMethod,
+ requestMethod: requestMethod,
+ requestURI: requestURI,
+ }
+}
+
+// Ctx stores a single request-response interaction context
+type Ctx struct {
+ user string
+ userAuthNMethod string
+ requestMethod string
+ requestURI string
+ plugins []Plugin
+ // authReq stores the cached request object for the current transaction
+ authReq *Request
+}
+
+// AuthZRequest authorized the request to the docker daemon using authZ plugins
+func (ctx *Ctx) AuthZRequest(w http.ResponseWriter, r *http.Request) error {
+ var body []byte
+ if sendBody(ctx.requestURI, r.Header) && r.ContentLength > 0 && r.ContentLength < maxBodySize {
+ var err error
+ body, r.Body, err = drainBody(r.Body)
+ if err != nil {
+ return err
+ }
+ }
+
+ var h bytes.Buffer
+ if err := r.Header.Write(&h); err != nil {
+ return err
+ }
+
+ ctx.authReq = &Request{
+ User: ctx.user,
+ UserAuthNMethod: ctx.userAuthNMethod,
+ RequestMethod: ctx.requestMethod,
+ RequestURI: ctx.requestURI,
+ RequestBody: body,
+ RequestHeaders: headers(r.Header),
+ }
+
+ for _, plugin := range ctx.plugins {
+ logrus.Debugf("AuthZ request using plugin %s", plugin.Name())
+
+ authRes, err := plugin.AuthZRequest(ctx.authReq)
+ if err != nil {
+ return fmt.Errorf("plugin %s failed with error: %s", plugin.Name(), err)
+ }
+
+ if !authRes.Allow {
+ return newAuthorizationError(plugin.Name(), authRes.Msg)
+ }
+ }
+
+ return nil
+}
+
+// AuthZResponse authorized and manipulates the response from docker daemon using authZ plugins
+func (ctx *Ctx) AuthZResponse(rm ResponseModifier, r *http.Request) error {
+ ctx.authReq.ResponseStatusCode = rm.StatusCode()
+ ctx.authReq.ResponseHeaders = headers(rm.Header())
+
+ if sendBody(ctx.requestURI, rm.Header()) {
+ ctx.authReq.ResponseBody = rm.RawBody()
+ }
+
+ for _, plugin := range ctx.plugins {
+ logrus.Debugf("AuthZ response using plugin %s", plugin.Name())
+
+ authRes, err := plugin.AuthZResponse(ctx.authReq)
+ if err != nil {
+ return fmt.Errorf("plugin %s failed with error: %s", plugin.Name(), err)
+ }
+
+ if !authRes.Allow {
+ return newAuthorizationError(plugin.Name(), authRes.Msg)
+ }
+ }
+
+ rm.FlushAll()
+
+ return nil
+}
+
+// drainBody dump the body (if its length is less than 1MB) without modifying the request state
+func drainBody(body io.ReadCloser) ([]byte, io.ReadCloser, error) {
+ bufReader := bufio.NewReaderSize(body, maxBodySize)
+ newBody := ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() })
+
+ data, err := bufReader.Peek(maxBodySize)
+ // Body size exceeds max body size
+ if err == nil {
+ logrus.Warnf("Request body is larger than: '%d' skipping body", maxBodySize)
+ return nil, newBody, nil
+ }
+ // Body size is less than maximum size
+ if err == io.EOF {
+ return data, newBody, nil
+ }
+ // Unknown error
+ return nil, newBody, err
+}
+
+// sendBody returns true when request/response body should be sent to AuthZPlugin
+func sendBody(url string, header http.Header) bool {
+ // Skip body for auth endpoint
+ if strings.HasSuffix(url, "/auth") {
+ return false
+ }
+
+ // body is sent only for text or json messages
+ return header.Get("Content-Type") == "application/json"
+}
+
+// headers returns flatten version of the http headers excluding authorization
+func headers(header http.Header) map[string]string {
+ v := make(map[string]string, 0)
+ for k, values := range header {
+ // Skip authorization headers
+ if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "X-Registry-Config") || strings.EqualFold(k, "X-Registry-Auth") {
+ continue
+ }
+ for _, val := range values {
+ v[k] = val
+ }
+ }
+ return v
+}
+
+// authorizationError represents an authorization deny error
+type authorizationError struct {
+ error
+}
+
+// HTTPErrorStatusCode returns the authorization error status code (forbidden)
+func (e authorizationError) HTTPErrorStatusCode() int {
+ return http.StatusForbidden
+}
+
+func newAuthorizationError(plugin, msg string) authorizationError {
+ return authorizationError{error: fmt.Errorf("authorization denied by plugin %s: %s", plugin, msg)}
+}
diff --git a/vendor/github.com/docker/docker/pkg/authorization/authz_unix_test.go b/vendor/github.com/docker/docker/pkg/authorization/authz_unix_test.go
new file mode 100644
index 00000000000..a787f3cd8c0
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/authorization/authz_unix_test.go
@@ -0,0 +1,282 @@
+// +build !windows
+
+// TODO Windows: This uses a Unix socket for testing. This might be possible
+// to port to Windows using a named pipe instead.
+
+package authorization
+
+import (
+ "bytes"
+ "encoding/json"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "path"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/docker/docker/pkg/plugins"
+ "github.com/docker/go-connections/tlsconfig"
+ "github.com/gorilla/mux"
+)
+
+const (
+ pluginAddress = "authz-test-plugin.sock"
+)
+
+func TestAuthZRequestPluginError(t *testing.T) {
+ server := authZPluginTestServer{t: t}
+ server.start()
+ defer server.stop()
+
+ authZPlugin := createTestPlugin(t)
+
+ request := Request{
+ User: "user",
+ RequestBody: []byte("sample body"),
+ RequestURI: "www.authz.com/auth",
+ RequestMethod: "GET",
+ RequestHeaders: map[string]string{"header": "value"},
+ }
+ server.replayResponse = Response{
+ Err: "an error",
+ }
+
+ actualResponse, err := authZPlugin.AuthZRequest(&request)
+ if err != nil {
+ t.Fatalf("Failed to authorize request %v", err)
+ }
+
+ if !reflect.DeepEqual(server.replayResponse, *actualResponse) {
+ t.Fatal("Response must be equal")
+ }
+ if !reflect.DeepEqual(request, server.recordedRequest) {
+ t.Fatal("Requests must be equal")
+ }
+}
+
+func TestAuthZRequestPlugin(t *testing.T) {
+ server := authZPluginTestServer{t: t}
+ server.start()
+ defer server.stop()
+
+ authZPlugin := createTestPlugin(t)
+
+ request := Request{
+ User: "user",
+ RequestBody: []byte("sample body"),
+ RequestURI: "www.authz.com/auth",
+ RequestMethod: "GET",
+ RequestHeaders: map[string]string{"header": "value"},
+ }
+ server.replayResponse = Response{
+ Allow: true,
+ Msg: "Sample message",
+ }
+
+ actualResponse, err := authZPlugin.AuthZRequest(&request)
+ if err != nil {
+ t.Fatalf("Failed to authorize request %v", err)
+ }
+
+ if !reflect.DeepEqual(server.replayResponse, *actualResponse) {
+ t.Fatal("Response must be equal")
+ }
+ if !reflect.DeepEqual(request, server.recordedRequest) {
+ t.Fatal("Requests must be equal")
+ }
+}
+
+func TestAuthZResponsePlugin(t *testing.T) {
+ server := authZPluginTestServer{t: t}
+ server.start()
+ defer server.stop()
+
+ authZPlugin := createTestPlugin(t)
+
+ request := Request{
+ User: "user",
+ RequestURI: "someting.com/auth",
+ RequestBody: []byte("sample body"),
+ }
+ server.replayResponse = Response{
+ Allow: true,
+ Msg: "Sample message",
+ }
+
+ actualResponse, err := authZPlugin.AuthZResponse(&request)
+ if err != nil {
+ t.Fatalf("Failed to authorize request %v", err)
+ }
+
+ if !reflect.DeepEqual(server.replayResponse, *actualResponse) {
+ t.Fatal("Response must be equal")
+ }
+ if !reflect.DeepEqual(request, server.recordedRequest) {
+ t.Fatal("Requests must be equal")
+ }
+}
+
+func TestResponseModifier(t *testing.T) {
+ r := httptest.NewRecorder()
+ m := NewResponseModifier(r)
+ m.Header().Set("h1", "v1")
+ m.Write([]byte("body"))
+ m.WriteHeader(http.StatusInternalServerError)
+
+ m.FlushAll()
+ if r.Header().Get("h1") != "v1" {
+ t.Fatalf("Header value must exists %s", r.Header().Get("h1"))
+ }
+ if !reflect.DeepEqual(r.Body.Bytes(), []byte("body")) {
+ t.Fatalf("Body value must exists %s", r.Body.Bytes())
+ }
+ if r.Code != http.StatusInternalServerError {
+ t.Fatalf("Status code must be correct %d", r.Code)
+ }
+}
+
+func TestDrainBody(t *testing.T) {
+ tests := []struct {
+ length int // length is the message length send to drainBody
+ expectedBodyLength int // expectedBodyLength is the expected body length after drainBody is called
+ }{
+ {10, 10}, // Small message size
+ {maxBodySize - 1, maxBodySize - 1}, // Max message size
+ {maxBodySize * 2, 0}, // Large message size (skip copying body)
+
+ }
+
+ for _, test := range tests {
+ msg := strings.Repeat("a", test.length)
+ body, closer, err := drainBody(ioutil.NopCloser(bytes.NewReader([]byte(msg))))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(body) != test.expectedBodyLength {
+ t.Fatalf("Body must be copied, actual length: '%d'", len(body))
+ }
+ if closer == nil {
+ t.Fatal("Closer must not be nil")
+ }
+ modified, err := ioutil.ReadAll(closer)
+ if err != nil {
+ t.Fatalf("Error must not be nil: '%v'", err)
+ }
+ if len(modified) != len(msg) {
+ t.Fatalf("Result should not be truncated. Original length: '%d', new length: '%d'", len(msg), len(modified))
+ }
+ }
+}
+
+func TestResponseModifierOverride(t *testing.T) {
+ r := httptest.NewRecorder()
+ m := NewResponseModifier(r)
+ m.Header().Set("h1", "v1")
+ m.Write([]byte("body"))
+ m.WriteHeader(http.StatusInternalServerError)
+
+ overrideHeader := make(http.Header)
+ overrideHeader.Add("h1", "v2")
+ overrideHeaderBytes, err := json.Marshal(overrideHeader)
+ if err != nil {
+ t.Fatalf("override header failed %v", err)
+ }
+
+ m.OverrideHeader(overrideHeaderBytes)
+ m.OverrideBody([]byte("override body"))
+ m.OverrideStatusCode(http.StatusNotFound)
+ m.FlushAll()
+ if r.Header().Get("h1") != "v2" {
+ t.Fatalf("Header value must exists %s", r.Header().Get("h1"))
+ }
+ if !reflect.DeepEqual(r.Body.Bytes(), []byte("override body")) {
+ t.Fatalf("Body value must exists %s", r.Body.Bytes())
+ }
+ if r.Code != http.StatusNotFound {
+ t.Fatalf("Status code must be correct %d", r.Code)
+ }
+}
+
+// createTestPlugin creates a new sample authorization plugin
+func createTestPlugin(t *testing.T) *authorizationPlugin {
+ pwd, err := os.Getwd()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ client, err := plugins.NewClient("unix:///"+path.Join(pwd, pluginAddress), &tlsconfig.Options{InsecureSkipVerify: true})
+ if err != nil {
+ t.Fatalf("Failed to create client %v", err)
+ }
+
+ return &authorizationPlugin{name: "plugin", plugin: client}
+}
+
+// AuthZPluginTestServer is a simple server that implements the authZ plugin interface
+type authZPluginTestServer struct {
+ listener net.Listener
+ t *testing.T
+ // request stores the request sent from the daemon to the plugin
+ recordedRequest Request
+ // response stores the response sent from the plugin to the daemon
+ replayResponse Response
+ server *httptest.Server
+}
+
+// start starts the test server that implements the plugin
+func (t *authZPluginTestServer) start() {
+ r := mux.NewRouter()
+ l, err := net.Listen("unix", pluginAddress)
+ if err != nil {
+ t.t.Fatal(err)
+ }
+ t.listener = l
+ r.HandleFunc("/Plugin.Activate", t.activate)
+ r.HandleFunc("/"+AuthZApiRequest, t.auth)
+ r.HandleFunc("/"+AuthZApiResponse, t.auth)
+ t.server = &httptest.Server{
+ Listener: l,
+ Config: &http.Server{
+ Handler: r,
+ Addr: pluginAddress,
+ },
+ }
+ t.server.Start()
+}
+
+// stop stops the test server that implements the plugin
+func (t *authZPluginTestServer) stop() {
+ t.server.Close()
+ os.Remove(pluginAddress)
+ if t.listener != nil {
+ t.listener.Close()
+ }
+}
+
+// auth is a used to record/replay the authentication api messages
+func (t *authZPluginTestServer) auth(w http.ResponseWriter, r *http.Request) {
+ t.recordedRequest = Request{}
+ body, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ t.t.Fatal(err)
+ }
+ r.Body.Close()
+ json.Unmarshal(body, &t.recordedRequest)
+ b, err := json.Marshal(t.replayResponse)
+ if err != nil {
+ t.t.Fatal(err)
+ }
+ w.Write(b)
+}
+
+func (t *authZPluginTestServer) activate(w http.ResponseWriter, r *http.Request) {
+ b, err := json.Marshal(plugins.Manifest{Implements: []string{AuthZApiImplements}})
+ if err != nil {
+ t.t.Fatal(err)
+ }
+ w.Write(b)
+}
diff --git a/vendor/github.com/docker/docker/pkg/authorization/middleware.go b/vendor/github.com/docker/docker/pkg/authorization/middleware.go
new file mode 100644
index 00000000000..73511a81489
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/authorization/middleware.go
@@ -0,0 +1,60 @@
+package authorization
+
+import (
+ "net/http"
+
+ "github.com/Sirupsen/logrus"
+ "golang.org/x/net/context"
+)
+
+// Middleware uses a list of plugins to
+// handle authorization in the API requests.
+type Middleware struct {
+ plugins []Plugin
+}
+
+// NewMiddleware creates a new Middleware
+// with a slice of plugins.
+func NewMiddleware(p []Plugin) Middleware {
+ return Middleware{
+ plugins: p,
+ }
+}
+
+// WrapHandler returns a new handler function wrapping the previous one in the request chain.
+func (m Middleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+ return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+
+ user := ""
+ userAuthNMethod := ""
+
+ // Default authorization using existing TLS connection credentials
+ // FIXME: Non trivial authorization mechanisms (such as advanced certificate validations, kerberos support
+ // and ldap) will be extracted using AuthN feature, which is tracked under:
+ // https://github.com/docker/docker/pull/20883
+ if r.TLS != nil && len(r.TLS.PeerCertificates) > 0 {
+ user = r.TLS.PeerCertificates[0].Subject.CommonName
+ userAuthNMethod = "TLS"
+ }
+
+ authCtx := NewCtx(m.plugins, user, userAuthNMethod, r.Method, r.RequestURI)
+
+ if err := authCtx.AuthZRequest(w, r); err != nil {
+ logrus.Errorf("AuthZRequest for %s %s returned error: %s", r.Method, r.RequestURI, err)
+ return err
+ }
+
+ rw := NewResponseModifier(w)
+
+ if err := handler(ctx, rw, r, vars); err != nil {
+ logrus.Errorf("Handler for %s %s returned error: %s", r.Method, r.RequestURI, err)
+ return err
+ }
+
+ if err := authCtx.AuthZResponse(rw, r); err != nil {
+ logrus.Errorf("AuthZResponse for %s %s returned error: %s", r.Method, r.RequestURI, err)
+ return err
+ }
+ return nil
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/authorization/plugin.go b/vendor/github.com/docker/docker/pkg/authorization/plugin.go
new file mode 100644
index 00000000000..fc5c7efb4b4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/authorization/plugin.go
@@ -0,0 +1,92 @@
+package authorization
+
+import (
+ "sync"
+
+ "github.com/docker/docker/pkg/plugins"
+)
+
+// Plugin allows third party plugins to authorize requests and responses
+// in the context of docker API
+type Plugin interface {
+ // Name returns the registered plugin name
+ Name() string
+
+ // AuthZRequest authorizes the request from the client to the daemon
+ AuthZRequest(*Request) (*Response, error)
+
+ // AuthZResponse authorizes the response from the daemon to the client
+ AuthZResponse(*Request) (*Response, error)
+}
+
+// NewPlugins constructs and initializes the authorization plugins based on plugin names
+func NewPlugins(names []string) []Plugin {
+ plugins := []Plugin{}
+ pluginsMap := make(map[string]struct{})
+ for _, name := range names {
+ if _, ok := pluginsMap[name]; ok {
+ continue
+ }
+ pluginsMap[name] = struct{}{}
+ plugins = append(plugins, newAuthorizationPlugin(name))
+ }
+ return plugins
+}
+
+// authorizationPlugin is an internal adapter to docker plugin system
+type authorizationPlugin struct {
+ plugin *plugins.Client
+ name string
+ once sync.Once
+}
+
+func newAuthorizationPlugin(name string) Plugin {
+ return &authorizationPlugin{name: name}
+}
+
+func (a *authorizationPlugin) Name() string {
+ return a.name
+}
+
+func (a *authorizationPlugin) AuthZRequest(authReq *Request) (*Response, error) {
+ if err := a.initPlugin(); err != nil {
+ return nil, err
+ }
+
+ authRes := &Response{}
+ if err := a.plugin.Call(AuthZApiRequest, authReq, authRes); err != nil {
+ return nil, err
+ }
+
+ return authRes, nil
+}
+
+func (a *authorizationPlugin) AuthZResponse(authReq *Request) (*Response, error) {
+ if err := a.initPlugin(); err != nil {
+ return nil, err
+ }
+
+ authRes := &Response{}
+ if err := a.plugin.Call(AuthZApiResponse, authReq, authRes); err != nil {
+ return nil, err
+ }
+
+ return authRes, nil
+}
+
+// initPlugin initializes the authorization plugin if needed
+func (a *authorizationPlugin) initPlugin() error {
+ // Lazy loading of plugins
+ var err error
+ a.once.Do(func() {
+ if a.plugin == nil {
+ plugin, e := plugins.Get(a.name, AuthZApiImplements)
+ if e != nil {
+ err = e
+ return
+ }
+ a.plugin = plugin.Client()
+ }
+ })
+ return err
+}
diff --git a/vendor/github.com/docker/docker/pkg/authorization/response.go b/vendor/github.com/docker/docker/pkg/authorization/response.go
new file mode 100644
index 00000000000..f29a5fa91eb
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/authorization/response.go
@@ -0,0 +1,201 @@
+package authorization
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "net"
+ "net/http"
+
+ "github.com/Sirupsen/logrus"
+)
+
+// ResponseModifier allows authorization plugins to read and modify the content of the http.response
+type ResponseModifier interface {
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+
+ // RawBody returns the current http content
+ RawBody() []byte
+
+ // RawHeaders returns the current content of the http headers
+ RawHeaders() ([]byte, error)
+
+ // StatusCode returns the current status code
+ StatusCode() int
+
+ // OverrideBody replaces the body of the HTTP reply
+ OverrideBody(b []byte)
+
+ // OverrideHeader replaces the headers of the HTTP reply
+ OverrideHeader(b []byte) error
+
+ // OverrideStatusCode replaces the status code of the HTTP reply
+ OverrideStatusCode(statusCode int)
+
+ // FlushAll flushes all data to the HTTP response
+ FlushAll() error
+
+ // Hijacked indicates the response has been hijacked by the Docker daemon
+ Hijacked() bool
+}
+
+// NewResponseModifier creates a wrapper to an http.ResponseWriter to allow inspecting and modifying the content
+func NewResponseModifier(rw http.ResponseWriter) ResponseModifier {
+ return &responseModifier{rw: rw, header: make(http.Header)}
+}
+
+// responseModifier is used as an adapter to http.ResponseWriter in order to manipulate and explore
+// the http request/response from docker daemon
+type responseModifier struct {
+ // The original response writer
+ rw http.ResponseWriter
+ // body holds the response body
+ body []byte
+ // header holds the response header
+ header http.Header
+ // statusCode holds the response status code
+ statusCode int
+ // hijacked indicates the request has been hijacked
+ hijacked bool
+}
+
+func (rm *responseModifier) Hijacked() bool {
+ return rm.hijacked
+}
+
+// WriterHeader stores the http status code
+func (rm *responseModifier) WriteHeader(s int) {
+
+ // Use original request if hijacked
+ if rm.hijacked {
+ rm.rw.WriteHeader(s)
+ return
+ }
+
+ rm.statusCode = s
+}
+
+// Header returns the internal http header
+func (rm *responseModifier) Header() http.Header {
+
+ // Use original header if hijacked
+ if rm.hijacked {
+ return rm.rw.Header()
+ }
+
+ return rm.header
+}
+
+// StatusCode returns the http status code
+func (rm *responseModifier) StatusCode() int {
+ return rm.statusCode
+}
+
+// OverrideBody replaces the body of the HTTP response
+func (rm *responseModifier) OverrideBody(b []byte) {
+ rm.body = b
+}
+
+// OverrideStatusCode replaces the status code of the HTTP response
+func (rm *responseModifier) OverrideStatusCode(statusCode int) {
+ rm.statusCode = statusCode
+}
+
+// OverrideHeader replaces the headers of the HTTP response
+func (rm *responseModifier) OverrideHeader(b []byte) error {
+ header := http.Header{}
+ if err := json.Unmarshal(b, &header); err != nil {
+ return err
+ }
+ rm.header = header
+ return nil
+}
+
+// Write stores the byte array inside content
+func (rm *responseModifier) Write(b []byte) (int, error) {
+
+ if rm.hijacked {
+ return rm.rw.Write(b)
+ }
+
+ rm.body = append(rm.body, b...)
+ return len(b), nil
+}
+
+// Body returns the response body
+func (rm *responseModifier) RawBody() []byte {
+ return rm.body
+}
+
+func (rm *responseModifier) RawHeaders() ([]byte, error) {
+ var b bytes.Buffer
+ if err := rm.header.Write(&b); err != nil {
+ return nil, err
+ }
+ return b.Bytes(), nil
+}
+
+// Hijack returns the internal connection of the wrapped http.ResponseWriter
+func (rm *responseModifier) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+
+ rm.hijacked = true
+ rm.FlushAll()
+
+ hijacker, ok := rm.rw.(http.Hijacker)
+ if !ok {
+ return nil, nil, fmt.Errorf("Internal response writer doesn't support the Hijacker interface")
+ }
+ return hijacker.Hijack()
+}
+
+// CloseNotify uses the internal close notify API of the wrapped http.ResponseWriter
+func (rm *responseModifier) CloseNotify() <-chan bool {
+ closeNotifier, ok := rm.rw.(http.CloseNotifier)
+ if !ok {
+ logrus.Error("Internal response writer doesn't support the CloseNotifier interface")
+ return nil
+ }
+ return closeNotifier.CloseNotify()
+}
+
+// Flush uses the internal flush API of the wrapped http.ResponseWriter
+func (rm *responseModifier) Flush() {
+ flusher, ok := rm.rw.(http.Flusher)
+ if !ok {
+ logrus.Error("Internal response writer doesn't support the Flusher interface")
+ return
+ }
+
+ rm.FlushAll()
+ flusher.Flush()
+}
+
+// FlushAll flushes all data to the HTTP response
+func (rm *responseModifier) FlushAll() error {
+ // Copy the status code
+ if rm.statusCode > 0 {
+ rm.rw.WriteHeader(rm.statusCode)
+ }
+
+ // Copy the header
+ for k, vv := range rm.header {
+ for _, v := range vv {
+ rm.rw.Header().Add(k, v)
+ }
+ }
+
+ var err error
+ if len(rm.body) > 0 {
+ // Write body
+ _, err = rm.rw.Write(rm.body)
+ }
+
+ // Clean previous data
+ rm.body = nil
+ rm.statusCode = 0
+ rm.header = http.Header{}
+ return err
+}
diff --git a/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go b/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go
new file mode 100644
index 00000000000..784d65d6feb
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go
@@ -0,0 +1,49 @@
+package broadcaster
+
+import (
+ "io"
+ "sync"
+)
+
+// Unbuffered accumulates multiple io.WriteCloser by stream.
+type Unbuffered struct {
+ mu sync.Mutex
+ writers []io.WriteCloser
+}
+
+// Add adds new io.WriteCloser.
+func (w *Unbuffered) Add(writer io.WriteCloser) {
+ w.mu.Lock()
+ w.writers = append(w.writers, writer)
+ w.mu.Unlock()
+}
+
+// Write writes bytes to all writers. Failed writers will be evicted during
+// this call.
+func (w *Unbuffered) Write(p []byte) (n int, err error) {
+ w.mu.Lock()
+ var evict []int
+ for i, sw := range w.writers {
+ if n, err := sw.Write(p); err != nil || n != len(p) {
+ // On error, evict the writer
+ evict = append(evict, i)
+ }
+ }
+ for n, i := range evict {
+ w.writers = append(w.writers[:i-n], w.writers[i-n+1:]...)
+ }
+ w.mu.Unlock()
+ return len(p), nil
+}
+
+// Clean closes and removes all writers. Last non-eol-terminated part of data
+// will be saved.
+func (w *Unbuffered) Clean() error {
+ w.mu.Lock()
+ for _, sw := range w.writers {
+ sw.Close()
+ }
+ w.writers = nil
+ w.mu.Unlock()
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered_test.go b/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered_test.go
new file mode 100644
index 00000000000..9f8e72bc0fc
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered_test.go
@@ -0,0 +1,162 @@
+package broadcaster
+
+import (
+ "bytes"
+ "errors"
+ "strings"
+
+ "testing"
+)
+
+type dummyWriter struct {
+ buffer bytes.Buffer
+ failOnWrite bool
+}
+
+func (dw *dummyWriter) Write(p []byte) (n int, err error) {
+ if dw.failOnWrite {
+ return 0, errors.New("Fake fail")
+ }
+ return dw.buffer.Write(p)
+}
+
+func (dw *dummyWriter) String() string {
+ return dw.buffer.String()
+}
+
+func (dw *dummyWriter) Close() error {
+ return nil
+}
+
+func TestUnbuffered(t *testing.T) {
+ writer := new(Unbuffered)
+
+ // Test 1: Both bufferA and bufferB should contain "foo"
+ bufferA := &dummyWriter{}
+ writer.Add(bufferA)
+ bufferB := &dummyWriter{}
+ writer.Add(bufferB)
+ writer.Write([]byte("foo"))
+
+ if bufferA.String() != "foo" {
+ t.Errorf("Buffer contains %v", bufferA.String())
+ }
+
+ if bufferB.String() != "foo" {
+ t.Errorf("Buffer contains %v", bufferB.String())
+ }
+
+ // Test2: bufferA and bufferB should contain "foobar",
+ // while bufferC should only contain "bar"
+ bufferC := &dummyWriter{}
+ writer.Add(bufferC)
+ writer.Write([]byte("bar"))
+
+ if bufferA.String() != "foobar" {
+ t.Errorf("Buffer contains %v", bufferA.String())
+ }
+
+ if bufferB.String() != "foobar" {
+ t.Errorf("Buffer contains %v", bufferB.String())
+ }
+
+ if bufferC.String() != "bar" {
+ t.Errorf("Buffer contains %v", bufferC.String())
+ }
+
+ // Test3: Test eviction on failure
+ bufferA.failOnWrite = true
+ writer.Write([]byte("fail"))
+ if bufferA.String() != "foobar" {
+ t.Errorf("Buffer contains %v", bufferA.String())
+ }
+ if bufferC.String() != "barfail" {
+ t.Errorf("Buffer contains %v", bufferC.String())
+ }
+ // Even though we reset the flag, no more writes should go in there
+ bufferA.failOnWrite = false
+ writer.Write([]byte("test"))
+ if bufferA.String() != "foobar" {
+ t.Errorf("Buffer contains %v", bufferA.String())
+ }
+ if bufferC.String() != "barfailtest" {
+ t.Errorf("Buffer contains %v", bufferC.String())
+ }
+
+ // Test4: Test eviction on multiple simultaneous failures
+ bufferB.failOnWrite = true
+ bufferC.failOnWrite = true
+ bufferD := &dummyWriter{}
+ writer.Add(bufferD)
+ writer.Write([]byte("yo"))
+ writer.Write([]byte("ink"))
+ if strings.Contains(bufferB.String(), "yoink") {
+ t.Errorf("bufferB received write. contents: %q", bufferB)
+ }
+ if strings.Contains(bufferC.String(), "yoink") {
+ t.Errorf("bufferC received write. contents: %q", bufferC)
+ }
+ if g, w := bufferD.String(), "yoink"; g != w {
+ t.Errorf("bufferD = %q, want %q", g, w)
+ }
+
+ writer.Clean()
+}
+
+type devNullCloser int
+
+func (d devNullCloser) Close() error {
+ return nil
+}
+
+func (d devNullCloser) Write(buf []byte) (int, error) {
+ return len(buf), nil
+}
+
+// This test checks for races. It is only useful when run with the race detector.
+func TestRaceUnbuffered(t *testing.T) {
+ writer := new(Unbuffered)
+ c := make(chan bool)
+ go func() {
+ writer.Add(devNullCloser(0))
+ c <- true
+ }()
+ writer.Write([]byte("hello"))
+ <-c
+}
+
+func BenchmarkUnbuffered(b *testing.B) {
+ writer := new(Unbuffered)
+ setUpWriter := func() {
+ for i := 0; i < 100; i++ {
+ writer.Add(devNullCloser(0))
+ writer.Add(devNullCloser(0))
+ writer.Add(devNullCloser(0))
+ }
+ }
+ testLine := "Line that thinks that it is log line from docker"
+ var buf bytes.Buffer
+ for i := 0; i < 100; i++ {
+ buf.Write([]byte(testLine + "\n"))
+ }
+ // line without eol
+ buf.Write([]byte(testLine))
+ testText := buf.Bytes()
+ b.SetBytes(int64(5 * len(testText)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ b.StopTimer()
+ setUpWriter()
+ b.StartTimer()
+
+ for j := 0; j < 5; j++ {
+ if _, err := writer.Write(testText); err != nil {
+ b.Fatal(err)
+ }
+ }
+
+ b.StopTimer()
+ writer.Clean()
+ b.StartTimer()
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go
new file mode 100644
index 00000000000..a7814f5b906
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go
@@ -0,0 +1,97 @@
+package chrootarchive
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "github.com/docker/docker/pkg/archive"
+ "github.com/docker/docker/pkg/idtools"
+)
+
+var chrootArchiver = &archive.Archiver{Untar: Untar}
+
+// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive may be compressed with one of the following algorithms:
+// identity (uncompressed), gzip, bzip2, xz.
+func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
+ return untarHandler(tarArchive, dest, options, true)
+}
+
+// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
+// and unpacks it into the directory at `dest`.
+// The archive must be an uncompressed stream.
+func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
+ return untarHandler(tarArchive, dest, options, false)
+}
+
+// Handler for teasing out the automatic decompression
+func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error {
+
+ if tarArchive == nil {
+ return fmt.Errorf("Empty archive")
+ }
+ if options == nil {
+ options = &archive.TarOptions{}
+ }
+ if options.ExcludePatterns == nil {
+ options.ExcludePatterns = []string{}
+ }
+
+ rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
+ if err != nil {
+ return err
+ }
+
+ dest = filepath.Clean(dest)
+ if _, err := os.Stat(dest); os.IsNotExist(err) {
+ if err := idtools.MkdirAllNewAs(dest, 0755, rootUID, rootGID); err != nil {
+ return err
+ }
+ }
+
+ r := ioutil.NopCloser(tarArchive)
+ if decompress {
+ decompressedArchive, err := archive.DecompressStream(tarArchive)
+ if err != nil {
+ return err
+ }
+ defer decompressedArchive.Close()
+ r = decompressedArchive
+ }
+
+ return invokeUnpack(r, dest, options)
+}
+
+// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
+// If either Tar or Untar fails, TarUntar aborts and returns the error.
+func TarUntar(src, dst string) error {
+ return chrootArchiver.TarUntar(src, dst)
+}
+
+// CopyWithTar creates a tar archive of filesystem path `src`, and
+// unpacks it at filesystem path `dst`.
+// The archive is streamed directly with fixed buffering and no
+// intermediary disk IO.
+func CopyWithTar(src, dst string) error {
+ return chrootArchiver.CopyWithTar(src, dst)
+}
+
+// CopyFileWithTar emulates the behavior of the 'cp' command-line
+// for a single file. It copies a regular file from path `src` to
+// path `dst`, and preserves all its metadata.
+//
+// If `dst` ends with a trailing slash '/' ('\' on Windows), the final
+// destination path will be `dst/base(src)` or `dst\base(src)`
+func CopyFileWithTar(src, dst string) (err error) {
+ return chrootArchiver.CopyFileWithTar(src, dst)
+}
+
+// UntarPath is a convenience function which looks for an archive
+// at filesystem path `src`, and unpacks it at `dst`.
+func UntarPath(src, dst string) error {
+ return chrootArchiver.UntarPath(src, dst)
+}
diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_test.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_test.go
new file mode 100644
index 00000000000..5fbe20843fd
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_test.go
@@ -0,0 +1,394 @@
+package chrootarchive
+
+import (
+ "bytes"
+ "fmt"
+ "hash/crc32"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/docker/docker/pkg/archive"
+ "github.com/docker/docker/pkg/reexec"
+ "github.com/docker/docker/pkg/system"
+)
+
+func init() {
+ reexec.Init()
+}
+
+func TestChrootTarUntar(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntar")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+ src := filepath.Join(tmpdir, "src")
+ if err := system.MkdirAll(src, 0700); err != nil {
+ t.Fatal(err)
+ }
+ if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil {
+ t.Fatal(err)
+ }
+ if err := ioutil.WriteFile(filepath.Join(src, "lolo"), []byte("hello lolo"), 0644); err != nil {
+ t.Fatal(err)
+ }
+ stream, err := archive.Tar(src, archive.Uncompressed)
+ if err != nil {
+ t.Fatal(err)
+ }
+ dest := filepath.Join(tmpdir, "src")
+ if err := system.MkdirAll(dest, 0700); err != nil {
+ t.Fatal(err)
+ }
+ if err := Untar(stream, dest, &archive.TarOptions{ExcludePatterns: []string{"lolo"}}); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// gh#10426: Verify the fix for having a huge excludes list (like on `docker load` with large # of
+// local images)
+func TestChrootUntarWithHugeExcludesList(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarHugeExcludes")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+ src := filepath.Join(tmpdir, "src")
+ if err := system.MkdirAll(src, 0700); err != nil {
+ t.Fatal(err)
+ }
+ if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil {
+ t.Fatal(err)
+ }
+ stream, err := archive.Tar(src, archive.Uncompressed)
+ if err != nil {
+ t.Fatal(err)
+ }
+ dest := filepath.Join(tmpdir, "dest")
+ if err := system.MkdirAll(dest, 0700); err != nil {
+ t.Fatal(err)
+ }
+ options := &archive.TarOptions{}
+ //65534 entries of 64-byte strings ~= 4MB of environment space which should overflow
+ //on most systems when passed via environment or command line arguments
+ excludes := make([]string, 65534, 65534)
+ for i := 0; i < 65534; i++ {
+ excludes[i] = strings.Repeat(string(i), 64)
+ }
+ options.ExcludePatterns = excludes
+ if err := Untar(stream, dest, options); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestChrootUntarEmptyArchive(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarEmptyArchive")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+ if err := Untar(nil, tmpdir, nil); err == nil {
+ t.Fatal("expected error on empty archive")
+ }
+}
+
+func prepareSourceDirectory(numberOfFiles int, targetPath string, makeSymLinks bool) (int, error) {
+ fileData := []byte("fooo")
+ for n := 0; n < numberOfFiles; n++ {
+ fileName := fmt.Sprintf("file-%d", n)
+ if err := ioutil.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil {
+ return 0, err
+ }
+ if makeSymLinks {
+ if err := os.Symlink(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil {
+ return 0, err
+ }
+ }
+ }
+ totalSize := numberOfFiles * len(fileData)
+ return totalSize, nil
+}
+
+func getHash(filename string) (uint32, error) {
+ stream, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return 0, err
+ }
+ hash := crc32.NewIEEE()
+ hash.Write(stream)
+ return hash.Sum32(), nil
+}
+
+func compareDirectories(src string, dest string) error {
+ changes, err := archive.ChangesDirs(dest, src)
+ if err != nil {
+ return err
+ }
+ if len(changes) > 0 {
+ return fmt.Errorf("Unexpected differences after untar: %v", changes)
+ }
+ return nil
+}
+
+func compareFiles(src string, dest string) error {
+ srcHash, err := getHash(src)
+ if err != nil {
+ return err
+ }
+ destHash, err := getHash(dest)
+ if err != nil {
+ return err
+ }
+ if srcHash != destHash {
+ return fmt.Errorf("%s is different from %s", src, dest)
+ }
+ return nil
+}
+
+func TestChrootTarUntarWithSymlink(t *testing.T) {
+ // TODO Windows: Figure out why this is failing
+ if runtime.GOOS == "windows" {
+ t.Skip("Failing on Windows")
+ }
+ tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntarWithSymlink")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+ src := filepath.Join(tmpdir, "src")
+ if err := system.MkdirAll(src, 0700); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := prepareSourceDirectory(10, src, true); err != nil {
+ t.Fatal(err)
+ }
+ dest := filepath.Join(tmpdir, "dest")
+ if err := TarUntar(src, dest); err != nil {
+ t.Fatal(err)
+ }
+ if err := compareDirectories(src, dest); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestChrootCopyWithTar(t *testing.T) {
+ // TODO Windows: Figure out why this is failing
+ if runtime.GOOS == "windows" {
+ t.Skip("Failing on Windows")
+ }
+ tmpdir, err := ioutil.TempDir("", "docker-TestChrootCopyWithTar")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+ src := filepath.Join(tmpdir, "src")
+ if err := system.MkdirAll(src, 0700); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := prepareSourceDirectory(10, src, true); err != nil {
+ t.Fatal(err)
+ }
+
+ // Copy directory
+ dest := filepath.Join(tmpdir, "dest")
+ if err := CopyWithTar(src, dest); err != nil {
+ t.Fatal(err)
+ }
+ if err := compareDirectories(src, dest); err != nil {
+ t.Fatal(err)
+ }
+
+ // Copy file
+ srcfile := filepath.Join(src, "file-1")
+ dest = filepath.Join(tmpdir, "destFile")
+ destfile := filepath.Join(dest, "file-1")
+ if err := CopyWithTar(srcfile, destfile); err != nil {
+ t.Fatal(err)
+ }
+ if err := compareFiles(srcfile, destfile); err != nil {
+ t.Fatal(err)
+ }
+
+ // Copy symbolic link
+ srcLinkfile := filepath.Join(src, "file-1-link")
+ dest = filepath.Join(tmpdir, "destSymlink")
+ destLinkfile := filepath.Join(dest, "file-1-link")
+ if err := CopyWithTar(srcLinkfile, destLinkfile); err != nil {
+ t.Fatal(err)
+ }
+ if err := compareFiles(srcLinkfile, destLinkfile); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestChrootCopyFileWithTar(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "docker-TestChrootCopyFileWithTar")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+ src := filepath.Join(tmpdir, "src")
+ if err := system.MkdirAll(src, 0700); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := prepareSourceDirectory(10, src, true); err != nil {
+ t.Fatal(err)
+ }
+
+ // Copy directory
+ dest := filepath.Join(tmpdir, "dest")
+ if err := CopyFileWithTar(src, dest); err == nil {
+ t.Fatal("Expected error on copying directory")
+ }
+
+ // Copy file
+ srcfile := filepath.Join(src, "file-1")
+ dest = filepath.Join(tmpdir, "destFile")
+ destfile := filepath.Join(dest, "file-1")
+ if err := CopyFileWithTar(srcfile, destfile); err != nil {
+ t.Fatal(err)
+ }
+ if err := compareFiles(srcfile, destfile); err != nil {
+ t.Fatal(err)
+ }
+
+ // Copy symbolic link
+ srcLinkfile := filepath.Join(src, "file-1-link")
+ dest = filepath.Join(tmpdir, "destSymlink")
+ destLinkfile := filepath.Join(dest, "file-1-link")
+ if err := CopyFileWithTar(srcLinkfile, destLinkfile); err != nil {
+ t.Fatal(err)
+ }
+ if err := compareFiles(srcLinkfile, destLinkfile); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestChrootUntarPath(t *testing.T) {
+ // TODO Windows: Figure out why this is failing
+ if runtime.GOOS == "windows" {
+ t.Skip("Failing on Windows")
+ }
+ tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarPath")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+ src := filepath.Join(tmpdir, "src")
+ if err := system.MkdirAll(src, 0700); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := prepareSourceDirectory(10, src, true); err != nil {
+ t.Fatal(err)
+ }
+ dest := filepath.Join(tmpdir, "dest")
+ // Untar a directory
+ if err := UntarPath(src, dest); err == nil {
+ t.Fatal("Expected error on untaring a directory")
+ }
+
+ // Untar a tar file
+ stream, err := archive.Tar(src, archive.Uncompressed)
+ if err != nil {
+ t.Fatal(err)
+ }
+ buf := new(bytes.Buffer)
+ buf.ReadFrom(stream)
+ tarfile := filepath.Join(tmpdir, "src.tar")
+ if err := ioutil.WriteFile(tarfile, buf.Bytes(), 0644); err != nil {
+ t.Fatal(err)
+ }
+ if err := UntarPath(tarfile, dest); err != nil {
+ t.Fatal(err)
+ }
+ if err := compareDirectories(src, dest); err != nil {
+ t.Fatal(err)
+ }
+}
+
+type slowEmptyTarReader struct {
+ size int
+ offset int
+ chunkSize int
+}
+
+// Read is a slow reader of an empty tar (like the output of "tar c --files-from /dev/null")
+func (s *slowEmptyTarReader) Read(p []byte) (int, error) {
+ time.Sleep(100 * time.Millisecond)
+ count := s.chunkSize
+ if len(p) < s.chunkSize {
+ count = len(p)
+ }
+ for i := 0; i < count; i++ {
+ p[i] = 0
+ }
+ s.offset += count
+ if s.offset > s.size {
+ return count, io.EOF
+ }
+ return count, nil
+}
+
+func TestChrootUntarEmptyArchiveFromSlowReader(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarEmptyArchiveFromSlowReader")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+ dest := filepath.Join(tmpdir, "dest")
+ if err := system.MkdirAll(dest, 0700); err != nil {
+ t.Fatal(err)
+ }
+ stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024}
+ if err := Untar(stream, dest, nil); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestChrootApplyEmptyArchiveFromSlowReader(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "docker-TestChrootApplyEmptyArchiveFromSlowReader")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+ dest := filepath.Join(tmpdir, "dest")
+ if err := system.MkdirAll(dest, 0700); err != nil {
+ t.Fatal(err)
+ }
+ stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024}
+ if _, err := ApplyLayer(dest, stream); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestChrootApplyDotDotFile(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "docker-TestChrootApplyDotDotFile")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+ src := filepath.Join(tmpdir, "src")
+ if err := system.MkdirAll(src, 0700); err != nil {
+ t.Fatal(err)
+ }
+ if err := ioutil.WriteFile(filepath.Join(src, "..gitme"), []byte(""), 0644); err != nil {
+ t.Fatal(err)
+ }
+ stream, err := archive.Tar(src, archive.Uncompressed)
+ if err != nil {
+ t.Fatal(err)
+ }
+ dest := filepath.Join(tmpdir, "dest")
+ if err := system.MkdirAll(dest, 0700); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := ApplyLayer(dest, stream); err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go
new file mode 100644
index 00000000000..f2325abd74e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go
@@ -0,0 +1,86 @@
+// +build !windows
+
+package chrootarchive
+
+import (
+ "bytes"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "runtime"
+
+ "github.com/docker/docker/pkg/archive"
+ "github.com/docker/docker/pkg/reexec"
+)
+
+// untar is the entry-point for docker-untar on re-exec. This is not used on
+// Windows as it does not support chroot, hence no point sandboxing through
+// chroot and rexec.
+func untar() {
+ runtime.LockOSThread()
+ flag.Parse()
+
+ var options *archive.TarOptions
+
+ //read the options from the pipe "ExtraFiles"
+ if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil {
+ fatal(err)
+ }
+
+ if err := chroot(flag.Arg(0)); err != nil {
+ fatal(err)
+ }
+
+ if err := archive.Unpack(os.Stdin, "/", options); err != nil {
+ fatal(err)
+ }
+ // fully consume stdin in case it is zero padded
+ if _, err := flush(os.Stdin); err != nil {
+ fatal(err)
+ }
+
+ os.Exit(0)
+}
+
+func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error {
+
+ // We can't pass a potentially large exclude list directly via cmd line
+ // because we easily overrun the kernel's max argument/environment size
+ // when the full image list is passed (e.g. when this is used by
+ // `docker load`). We will marshall the options via a pipe to the
+ // child
+ r, w, err := os.Pipe()
+ if err != nil {
+ return fmt.Errorf("Untar pipe failure: %v", err)
+ }
+
+ cmd := reexec.Command("docker-untar", dest)
+ cmd.Stdin = decompressedArchive
+
+ cmd.ExtraFiles = append(cmd.ExtraFiles, r)
+ output := bytes.NewBuffer(nil)
+ cmd.Stdout = output
+ cmd.Stderr = output
+
+ if err := cmd.Start(); err != nil {
+ return fmt.Errorf("Untar error on re-exec cmd: %v", err)
+ }
+ //write the options to the pipe for the untar exec to read
+ if err := json.NewEncoder(w).Encode(options); err != nil {
+ return fmt.Errorf("Untar json encode to pipe failed: %v", err)
+ }
+ w.Close()
+
+ if err := cmd.Wait(); err != nil {
+ // when `xz -d -c -q | docker-untar ...` failed on docker-untar side,
+ // we need to exhaust `xz`'s output, otherwise the `xz` side will be
+ // pending on write pipe forever
+ io.Copy(ioutil.Discard, decompressedArchive)
+
+ return fmt.Errorf("Error processing tar file(%v): %s", err, output)
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go
new file mode 100644
index 00000000000..0a500ed5c2d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go
@@ -0,0 +1,22 @@
+package chrootarchive
+
+import (
+ "io"
+
+ "github.com/docker/docker/pkg/archive"
+ "github.com/docker/docker/pkg/longpath"
+)
+
+// chroot is not supported by Windows
+func chroot(path string) error {
+ return nil
+}
+
+func invokeUnpack(decompressedArchive io.ReadCloser,
+ dest string,
+ options *archive.TarOptions) error {
+ // Windows is different to Linux here because Windows does not support
+ // chroot. Hence there is no point sandboxing a chrooted process to
+ // do the unpack. We call inline instead within the daemon process.
+ return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options)
+}
diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go
new file mode 100644
index 00000000000..cefbef9df49
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go
@@ -0,0 +1,103 @@
+package chrootarchive
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "syscall"
+
+ "github.com/docker/docker/pkg/mount"
+)
+
+// chroot on linux uses pivot_root instead of chroot
+// pivot_root takes a new root and an old root.
+// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root.
+// New root is where the new rootfs is set to.
+// Old root is removed after the call to pivot_root so it is no longer available under the new root.
+// This is similar to how libcontainer sets up a container's rootfs
+func chroot(path string) (err error) {
+ if err := syscall.Unshare(syscall.CLONE_NEWNS); err != nil {
+ return fmt.Errorf("Error creating mount namespace before pivot: %v", err)
+ }
+
+ if err := mount.MakeRPrivate(path); err != nil {
+ return err
+ }
+
+ // setup oldRoot for pivot_root
+ pivotDir, err := ioutil.TempDir(path, ".pivot_root")
+ if err != nil {
+ return fmt.Errorf("Error setting up pivot dir: %v", err)
+ }
+
+ var mounted bool
+ defer func() {
+ if mounted {
+ // make sure pivotDir is not mounted before we try to remove it
+ if errCleanup := syscall.Unmount(pivotDir, syscall.MNT_DETACH); errCleanup != nil {
+ if err == nil {
+ err = errCleanup
+ }
+ return
+ }
+ }
+
+ errCleanup := os.Remove(pivotDir)
+ // pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful
+ // because we already cleaned it up on failed pivot_root
+ if errCleanup != nil && !os.IsNotExist(errCleanup) {
+ errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup)
+ if err == nil {
+ err = errCleanup
+ }
+ }
+
+ if errCleanup := syscall.Unmount("/", syscall.MNT_DETACH); errCleanup != nil {
+ if err == nil {
+ err = fmt.Errorf("error unmounting root: %v", errCleanup)
+ }
+ return
+ }
+ }()
+
+ if err := syscall.PivotRoot(path, pivotDir); err != nil {
+ // If pivot fails, fall back to the normal chroot after cleaning up temp dir
+ if err := os.Remove(pivotDir); err != nil {
+ return fmt.Errorf("Error cleaning up after failed pivot: %v", err)
+ }
+ return realChroot(path)
+ }
+ mounted = true
+
+ // This is the new path for where the old root (prior to the pivot) has been moved to
+ // This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction
+ pivotDir = filepath.Join("/", filepath.Base(pivotDir))
+
+ if err := syscall.Chdir("/"); err != nil {
+ return fmt.Errorf("Error changing to new root: %v", err)
+ }
+
+ // Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host
+ if err := syscall.Mount("", pivotDir, "", syscall.MS_PRIVATE|syscall.MS_REC, ""); err != nil {
+ return fmt.Errorf("Error making old root private after pivot: %v", err)
+ }
+
+ // Now unmount the old root so it's no longer visible from the new root
+ if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil {
+ return fmt.Errorf("Error while unmounting old root after pivot: %v", err)
+ }
+ mounted = false
+
+ return nil
+}
+
+func realChroot(path string) error {
+ if err := syscall.Chroot(path); err != nil {
+ return fmt.Errorf("Error after fallback to chroot: %v", err)
+ }
+ if err := syscall.Chdir("/"); err != nil {
+ return fmt.Errorf("Error changing to new root after chroot: %v", err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go
new file mode 100644
index 00000000000..16354bf6487
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go
@@ -0,0 +1,12 @@
+// +build !windows,!linux
+
+package chrootarchive
+
+import "syscall"
+
+func chroot(path string) error {
+ if err := syscall.Chroot(path); err != nil {
+ return err
+ }
+ return syscall.Chdir("/")
+}
diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go
new file mode 100644
index 00000000000..94131a6eb8f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go
@@ -0,0 +1,19 @@
+package chrootarchive
+
+import "github.com/docker/docker/pkg/archive"
+
+// ApplyLayer parses a diff in the standard layer format from `layer`,
+// and applies it to the directory `dest`. The stream `layer` can only be
+// uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func ApplyLayer(dest string, layer archive.Reader) (size int64, err error) {
+ return applyLayerHandler(dest, layer, &archive.TarOptions{}, true)
+}
+
+// ApplyUncompressedLayer parses a diff in the standard layer format from
+// `layer`, and applies it to the directory `dest`. The stream `layer`
+// can only be uncompressed.
+// Returns the size in bytes of the contents of the layer.
+func ApplyUncompressedLayer(dest string, layer archive.Reader, options *archive.TarOptions) (int64, error) {
+ return applyLayerHandler(dest, layer, options, false)
+}
diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go
new file mode 100644
index 00000000000..a4adb74d58f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go
@@ -0,0 +1,120 @@
+//+build !windows
+
+package chrootarchive
+
+import (
+ "bytes"
+ "encoding/json"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+
+ "github.com/docker/docker/pkg/archive"
+ "github.com/docker/docker/pkg/reexec"
+ "github.com/docker/docker/pkg/system"
+)
+
+type applyLayerResponse struct {
+ LayerSize int64 `json:"layerSize"`
+}
+
+// applyLayer is the entry-point for docker-applylayer on re-exec. This is not
+// used on Windows as it does not support chroot, hence no point sandboxing
+// through chroot and rexec.
+func applyLayer() {
+
+ var (
+ tmpDir = ""
+ err error
+ options *archive.TarOptions
+ )
+ runtime.LockOSThread()
+ flag.Parse()
+
+ if err := chroot(flag.Arg(0)); err != nil {
+ fatal(err)
+ }
+
+ // We need to be able to set any perms
+ oldmask, err := system.Umask(0)
+ defer system.Umask(oldmask)
+ if err != nil {
+ fatal(err)
+ }
+
+ if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil {
+ fatal(err)
+ }
+
+ if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil {
+ fatal(err)
+ }
+
+ os.Setenv("TMPDIR", tmpDir)
+ size, err := archive.UnpackLayer("/", os.Stdin, options)
+ os.RemoveAll(tmpDir)
+ if err != nil {
+ fatal(err)
+ }
+
+ encoder := json.NewEncoder(os.Stdout)
+ if err := encoder.Encode(applyLayerResponse{size}); err != nil {
+ fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err))
+ }
+
+ if _, err := flush(os.Stdin); err != nil {
+ fatal(err)
+ }
+
+ os.Exit(0)
+}
+
+// applyLayerHandler parses a diff in the standard layer format from `layer`, and
+// applies it to the directory `dest`. Returns the size in bytes of the
+// contents of the layer.
+func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
+ dest = filepath.Clean(dest)
+ if decompress {
+ decompressed, err := archive.DecompressStream(layer)
+ if err != nil {
+ return 0, err
+ }
+ defer decompressed.Close()
+
+ layer = decompressed
+ }
+ if options == nil {
+ options = &archive.TarOptions{}
+ }
+ if options.ExcludePatterns == nil {
+ options.ExcludePatterns = []string{}
+ }
+
+ data, err := json.Marshal(options)
+ if err != nil {
+ return 0, fmt.Errorf("ApplyLayer json encode: %v", err)
+ }
+
+ cmd := reexec.Command("docker-applyLayer", dest)
+ cmd.Stdin = layer
+ cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data))
+
+ outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer)
+ cmd.Stdout, cmd.Stderr = outBuf, errBuf
+
+ if err = cmd.Run(); err != nil {
+ return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf)
+ }
+
+ // Stdout should be a valid JSON struct representing an applyLayerResponse.
+ response := applyLayerResponse{}
+ decoder := json.NewDecoder(outBuf)
+ if err = decoder.Decode(&response); err != nil {
+ return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err)
+ }
+
+ return response.LayerSize, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go
new file mode 100644
index 00000000000..8e1830cb830
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go
@@ -0,0 +1,44 @@
+package chrootarchive
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "github.com/docker/docker/pkg/archive"
+ "github.com/docker/docker/pkg/longpath"
+)
+
+// applyLayerHandler parses a diff in the standard layer format from `layer`, and
+// applies it to the directory `dest`. Returns the size in bytes of the
+// contents of the layer.
+func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
+ dest = filepath.Clean(dest)
+
+ // Ensure it is a Windows-style volume path
+ dest = longpath.AddPrefix(dest)
+
+ if decompress {
+ decompressed, err := archive.DecompressStream(layer)
+ if err != nil {
+ return 0, err
+ }
+ defer decompressed.Close()
+
+ layer = decompressed
+ }
+
+ tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract")
+ if err != nil {
+ return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err)
+ }
+
+ s, err := archive.UnpackLayer(dest, layer, nil)
+ os.RemoveAll(tmpDir)
+ if err != nil {
+ return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s", err, dest)
+ }
+
+ return s, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go
new file mode 100644
index 00000000000..4f637f17b8f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go
@@ -0,0 +1,28 @@
+// +build !windows
+
+package chrootarchive
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+
+ "github.com/docker/docker/pkg/reexec"
+)
+
+func init() {
+ reexec.Register("docker-applyLayer", applyLayer)
+ reexec.Register("docker-untar", untar)
+}
+
+func fatal(err error) {
+ fmt.Fprint(os.Stderr, err)
+ os.Exit(1)
+}
+
+// flush consumes all the bytes from the reader discarding
+// any errors
+func flush(r io.Reader) (bytes int64, err error) {
+ return io.Copy(ioutil.Discard, r)
+}
diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go
new file mode 100644
index 00000000000..fa17c9bf831
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go
@@ -0,0 +1,4 @@
+package chrootarchive
+
+func init() {
+}
diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper.go b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper.go
new file mode 100644
index 00000000000..838b06af40f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper.go
@@ -0,0 +1,807 @@
+// +build linux
+
+package devicemapper
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "runtime"
+ "syscall"
+ "unsafe"
+
+ "github.com/Sirupsen/logrus"
+)
+
+// DevmapperLogger defines methods for logging with devicemapper.
+type DevmapperLogger interface {
+ DMLog(level int, file string, line int, dmError int, message string)
+}
+
+const (
+ deviceCreate TaskType = iota
+ deviceReload
+ deviceRemove
+ deviceRemoveAll
+ deviceSuspend
+ deviceResume
+ deviceInfo
+ deviceDeps
+ deviceRename
+ deviceVersion
+ deviceStatus
+ deviceTable
+ deviceWaitevent
+ deviceList
+ deviceClear
+ deviceMknodes
+ deviceListVersions
+ deviceTargetMsg
+ deviceSetGeometry
+)
+
+const (
+ addNodeOnResume AddNodeType = iota
+ addNodeOnCreate
+)
+
+// List of errors returned when using devicemapper.
+var (
+ ErrTaskRun = errors.New("dm_task_run failed")
+ ErrTaskSetName = errors.New("dm_task_set_name failed")
+ ErrTaskSetMessage = errors.New("dm_task_set_message failed")
+ ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed")
+ ErrTaskSetRo = errors.New("dm_task_set_ro failed")
+ ErrTaskAddTarget = errors.New("dm_task_add_target failed")
+ ErrTaskSetSector = errors.New("dm_task_set_sector failed")
+ ErrTaskGetDeps = errors.New("dm_task_get_deps failed")
+ ErrTaskGetInfo = errors.New("dm_task_get_info failed")
+ ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed")
+ ErrTaskDeferredRemove = errors.New("dm_task_deferred_remove failed")
+ ErrTaskSetCookie = errors.New("dm_task_set_cookie failed")
+ ErrNilCookie = errors.New("cookie ptr can't be nil")
+ ErrGetBlockSize = errors.New("Can't get block size")
+ ErrUdevWait = errors.New("wait on udev cookie failed")
+ ErrSetDevDir = errors.New("dm_set_dev_dir failed")
+ ErrGetLibraryVersion = errors.New("dm_get_library_version failed")
+ ErrCreateRemoveTask = errors.New("Can't create task of type deviceRemove")
+ ErrRunRemoveDevice = errors.New("running RemoveDevice failed")
+ ErrInvalidAddNode = errors.New("Invalid AddNode type")
+ ErrBusy = errors.New("Device is Busy")
+ ErrDeviceIDExists = errors.New("Device Id Exists")
+ ErrEnxio = errors.New("No such device or address")
+)
+
+var (
+ dmSawBusy bool
+ dmSawExist bool
+ dmSawEnxio bool // No Such Device or Address
+)
+
+type (
+ // Task represents a devicemapper task (like lvcreate, etc.) ; a task is needed for each ioctl
+ // command to execute.
+ Task struct {
+ unmanaged *cdmTask
+ }
+ // Deps represents dependents (layer) of a device.
+ Deps struct {
+ Count uint32
+ Filler uint32
+ Device []uint64
+ }
+ // Info represents information about a device.
+ Info struct {
+ Exists int
+ Suspended int
+ LiveTable int
+ InactiveTable int
+ OpenCount int32
+ EventNr uint32
+ Major uint32
+ Minor uint32
+ ReadOnly int
+ TargetCount int32
+ DeferredRemove int
+ }
+ // TaskType represents a type of task
+ TaskType int
+ // AddNodeType represents a type of node to be added
+ AddNodeType int
+)
+
+// DeviceIDExists returns whether error conveys the information about device Id already
+// exist or not. This will be true if device creation or snap creation
+// operation fails if device or snap device already exists in pool.
+// Current implementation is little crude as it scans the error string
+// for exact pattern match. Replacing it with more robust implementation
+// is desirable.
+func DeviceIDExists(err error) bool {
+ return fmt.Sprint(err) == fmt.Sprint(ErrDeviceIDExists)
+}
+
+func (t *Task) destroy() {
+ if t != nil {
+ DmTaskDestroy(t.unmanaged)
+ runtime.SetFinalizer(t, nil)
+ }
+}
+
+// TaskCreateNamed is a convenience function for TaskCreate when a name
+// will be set on the task as well
+func TaskCreateNamed(t TaskType, name string) (*Task, error) {
+ task := TaskCreate(t)
+ if task == nil {
+ return nil, fmt.Errorf("devicemapper: Can't create task of type %d", int(t))
+ }
+ if err := task.setName(name); err != nil {
+ return nil, fmt.Errorf("devicemapper: Can't set task name %s", name)
+ }
+ return task, nil
+}
+
+// TaskCreate initializes a devicemapper task of tasktype
+func TaskCreate(tasktype TaskType) *Task {
+ Ctask := DmTaskCreate(int(tasktype))
+ if Ctask == nil {
+ return nil
+ }
+ task := &Task{unmanaged: Ctask}
+ runtime.SetFinalizer(task, (*Task).destroy)
+ return task
+}
+
+func (t *Task) run() error {
+ if res := DmTaskRun(t.unmanaged); res != 1 {
+ return ErrTaskRun
+ }
+ return nil
+}
+
+func (t *Task) setName(name string) error {
+ if res := DmTaskSetName(t.unmanaged, name); res != 1 {
+ return ErrTaskSetName
+ }
+ return nil
+}
+
+func (t *Task) setMessage(message string) error {
+ if res := DmTaskSetMessage(t.unmanaged, message); res != 1 {
+ return ErrTaskSetMessage
+ }
+ return nil
+}
+
+func (t *Task) setSector(sector uint64) error {
+ if res := DmTaskSetSector(t.unmanaged, sector); res != 1 {
+ return ErrTaskSetSector
+ }
+ return nil
+}
+
+func (t *Task) setCookie(cookie *uint, flags uint16) error {
+ if cookie == nil {
+ return ErrNilCookie
+ }
+ if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 {
+ return ErrTaskSetCookie
+ }
+ return nil
+}
+
+func (t *Task) setAddNode(addNode AddNodeType) error {
+ if addNode != addNodeOnResume && addNode != addNodeOnCreate {
+ return ErrInvalidAddNode
+ }
+ if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 {
+ return ErrTaskSetAddNode
+ }
+ return nil
+}
+
+func (t *Task) setRo() error {
+ if res := DmTaskSetRo(t.unmanaged); res != 1 {
+ return ErrTaskSetRo
+ }
+ return nil
+}
+
+func (t *Task) addTarget(start, size uint64, ttype, params string) error {
+ if res := DmTaskAddTarget(t.unmanaged, start, size,
+ ttype, params); res != 1 {
+ return ErrTaskAddTarget
+ }
+ return nil
+}
+
+func (t *Task) getDeps() (*Deps, error) {
+ var deps *Deps
+ if deps = DmTaskGetDeps(t.unmanaged); deps == nil {
+ return nil, ErrTaskGetDeps
+ }
+ return deps, nil
+}
+
+func (t *Task) getInfo() (*Info, error) {
+ info := &Info{}
+ if res := DmTaskGetInfo(t.unmanaged, info); res != 1 {
+ return nil, ErrTaskGetInfo
+ }
+ return info, nil
+}
+
+func (t *Task) getInfoWithDeferred() (*Info, error) {
+ info := &Info{}
+ if res := DmTaskGetInfoWithDeferred(t.unmanaged, info); res != 1 {
+ return nil, ErrTaskGetInfo
+ }
+ return info, nil
+}
+
+func (t *Task) getDriverVersion() (string, error) {
+ res := DmTaskGetDriverVersion(t.unmanaged)
+ if res == "" {
+ return "", ErrTaskGetDriverVersion
+ }
+ return res, nil
+}
+
+func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start uint64,
+ length uint64, targetType string, params string) {
+
+ return DmGetNextTarget(t.unmanaged, next, &start, &length,
+ &targetType, ¶ms),
+ start, length, targetType, params
+}
+
+// UdevWait waits for any processes that are waiting for udev to complete the specified cookie.
+func UdevWait(cookie *uint) error {
+ if res := DmUdevWait(*cookie); res != 1 {
+ logrus.Debugf("devicemapper: Failed to wait on udev cookie %d", *cookie)
+ return ErrUdevWait
+ }
+ return nil
+}
+
+// LogInitVerbose is an interface to initialize the verbose logger for the device mapper library.
+func LogInitVerbose(level int) {
+ DmLogInitVerbose(level)
+}
+
+var dmLogger DevmapperLogger
+
+// LogInit initializes the logger for the device mapper library.
+func LogInit(logger DevmapperLogger) {
+ dmLogger = logger
+ LogWithErrnoInit()
+}
+
+// SetDevDir sets the dev folder for the device mapper library (usually /dev).
+func SetDevDir(dir string) error {
+ if res := DmSetDevDir(dir); res != 1 {
+ logrus.Debug("devicemapper: Error dm_set_dev_dir")
+ return ErrSetDevDir
+ }
+ return nil
+}
+
+// GetLibraryVersion returns the device mapper library version.
+func GetLibraryVersion() (string, error) {
+ var version string
+ if res := DmGetLibraryVersion(&version); res != 1 {
+ return "", ErrGetLibraryVersion
+ }
+ return version, nil
+}
+
+// UdevSyncSupported returns whether device-mapper is able to sync with udev
+//
+// This is essential otherwise race conditions can arise where both udev and
+// device-mapper attempt to create and destroy devices.
+func UdevSyncSupported() bool {
+ return DmUdevGetSyncSupport() != 0
+}
+
+// UdevSetSyncSupport allows setting whether the udev sync should be enabled.
+// The return bool indicates the state of whether the sync is enabled.
+func UdevSetSyncSupport(enable bool) bool {
+ if enable {
+ DmUdevSetSyncSupport(1)
+ } else {
+ DmUdevSetSyncSupport(0)
+ }
+
+ return UdevSyncSupported()
+}
+
+// CookieSupported returns whether the version of device-mapper supports the
+// use of cookie's in the tasks.
+// This is largely a lower level call that other functions use.
+func CookieSupported() bool {
+ return DmCookieSupported() != 0
+}
+
+// RemoveDevice is a useful helper for cleaning up a device.
+func RemoveDevice(name string) error {
+ task, err := TaskCreateNamed(deviceRemove, name)
+ if task == nil {
+ return err
+ }
+
+ var cookie uint
+ if err := task.setCookie(&cookie, 0); err != nil {
+ return fmt.Errorf("devicemapper: Can not set cookie: %s", err)
+ }
+ defer UdevWait(&cookie)
+
+ dmSawBusy = false // reset before the task is run
+ if err = task.run(); err != nil {
+ if dmSawBusy {
+ return ErrBusy
+ }
+ return fmt.Errorf("devicemapper: Error running RemoveDevice %s", err)
+ }
+
+ return nil
+}
+
+// RemoveDeviceDeferred is a useful helper for cleaning up a device, but deferred.
+func RemoveDeviceDeferred(name string) error {
+ logrus.Debugf("devicemapper: RemoveDeviceDeferred START(%s)", name)
+ defer logrus.Debugf("devicemapper: RemoveDeviceDeferred END(%s)", name)
+ task, err := TaskCreateNamed(deviceRemove, name)
+ if task == nil {
+ return err
+ }
+
+ if err := DmTaskDeferredRemove(task.unmanaged); err != 1 {
+ return ErrTaskDeferredRemove
+ }
+
+ if err = task.run(); err != nil {
+ return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err)
+ }
+
+ return nil
+}
+
+// CancelDeferredRemove cancels a deferred remove for a device.
+func CancelDeferredRemove(deviceName string) error {
+ task, err := TaskCreateNamed(deviceTargetMsg, deviceName)
+ if task == nil {
+ return err
+ }
+
+ if err := task.setSector(0); err != nil {
+ return fmt.Errorf("devicemapper: Can't set sector %s", err)
+ }
+
+ if err := task.setMessage(fmt.Sprintf("@cancel_deferred_remove")); err != nil {
+ return fmt.Errorf("devicemapper: Can't set message %s", err)
+ }
+
+ dmSawBusy = false
+ dmSawEnxio = false
+ if err := task.run(); err != nil {
+ // A device might be being deleted already
+ if dmSawBusy {
+ return ErrBusy
+ } else if dmSawEnxio {
+ return ErrEnxio
+ }
+ return fmt.Errorf("devicemapper: Error running CancelDeferredRemove %s", err)
+
+ }
+ return nil
+}
+
+// GetBlockDeviceSize returns the size of a block device identified by the specified file.
+func GetBlockDeviceSize(file *os.File) (uint64, error) {
+ size, err := ioctlBlkGetSize64(file.Fd())
+ if err != nil {
+ logrus.Errorf("devicemapper: Error getblockdevicesize: %s", err)
+ return 0, ErrGetBlockSize
+ }
+ return uint64(size), nil
+}
+
+// BlockDeviceDiscard runs discard for the given path.
+// This is used as a workaround for the kernel not discarding block so
+// on the thin pool when we remove a thinp device, so we do it
+// manually
+func BlockDeviceDiscard(path string) error {
+ file, err := os.OpenFile(path, os.O_RDWR, 0)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ size, err := GetBlockDeviceSize(file)
+ if err != nil {
+ return err
+ }
+
+ if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil {
+ return err
+ }
+
+ // Without this sometimes the remove of the device that happens after
+ // discard fails with EBUSY.
+ syscall.Sync()
+
+ return nil
+}
+
+// CreatePool is the programmatic example of "dmsetup create".
+// It creates a device with the specified poolName, data and metadata file and block size.
+func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error {
+ task, err := TaskCreateNamed(deviceCreate, poolName)
+ if task == nil {
+ return err
+ }
+
+ size, err := GetBlockDeviceSize(dataFile)
+ if err != nil {
+ return fmt.Errorf("devicemapper: Can't get data size %s", err)
+ }
+
+ params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize)
+ if err := task.addTarget(0, size/512, "thin-pool", params); err != nil {
+ return fmt.Errorf("devicemapper: Can't add target %s", err)
+ }
+
+ var cookie uint
+ var flags uint16
+ flags = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag
+ if err := task.setCookie(&cookie, flags); err != nil {
+ return fmt.Errorf("devicemapper: Can't set cookie %s", err)
+ }
+ defer UdevWait(&cookie)
+
+ if err := task.run(); err != nil {
+ return fmt.Errorf("devicemapper: Error running deviceCreate (CreatePool) %s", err)
+ }
+
+ return nil
+}
+
+// ReloadPool is the programmatic example of "dmsetup reload".
+// It reloads the table with the specified poolName, data and metadata file and block size.
+func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error {
+ task, err := TaskCreateNamed(deviceReload, poolName)
+ if task == nil {
+ return err
+ }
+
+ size, err := GetBlockDeviceSize(dataFile)
+ if err != nil {
+ return fmt.Errorf("devicemapper: Can't get data size %s", err)
+ }
+
+ params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize)
+ if err := task.addTarget(0, size/512, "thin-pool", params); err != nil {
+ return fmt.Errorf("devicemapper: Can't add target %s", err)
+ }
+
+ if err := task.run(); err != nil {
+ return fmt.Errorf("devicemapper: Error running deviceCreate %s", err)
+ }
+
+ return nil
+}
+
+// GetDeps is the programmatic example of "dmsetup deps".
+// It outputs a list of devices referenced by the live table for the specified device.
+func GetDeps(name string) (*Deps, error) {
+ task, err := TaskCreateNamed(deviceDeps, name)
+ if task == nil {
+ return nil, err
+ }
+ if err := task.run(); err != nil {
+ return nil, err
+ }
+ return task.getDeps()
+}
+
+// GetInfo is the programmatic example of "dmsetup info".
+// It outputs some brief information about the device.
+func GetInfo(name string) (*Info, error) {
+ task, err := TaskCreateNamed(deviceInfo, name)
+ if task == nil {
+ return nil, err
+ }
+ if err := task.run(); err != nil {
+ return nil, err
+ }
+ return task.getInfo()
+}
+
+// GetInfoWithDeferred is the programmatic example of "dmsetup info", but deferred.
+// It outputs some brief information about the device.
+func GetInfoWithDeferred(name string) (*Info, error) {
+ task, err := TaskCreateNamed(deviceInfo, name)
+ if task == nil {
+ return nil, err
+ }
+ if err := task.run(); err != nil {
+ return nil, err
+ }
+ return task.getInfoWithDeferred()
+}
+
+// GetDriverVersion is the programmatic example of "dmsetup version".
+// It outputs version information of the driver.
+func GetDriverVersion() (string, error) {
+ task := TaskCreate(deviceVersion)
+ if task == nil {
+ return "", fmt.Errorf("devicemapper: Can't create deviceVersion task")
+ }
+ if err := task.run(); err != nil {
+ return "", err
+ }
+ return task.getDriverVersion()
+}
+
+// GetStatus is the programmatic example of "dmsetup status".
+// It outputs status information for the specified device name.
+func GetStatus(name string) (uint64, uint64, string, string, error) {
+ task, err := TaskCreateNamed(deviceStatus, name)
+ if task == nil {
+ logrus.Debugf("devicemapper: GetStatus() Error TaskCreateNamed: %s", err)
+ return 0, 0, "", "", err
+ }
+ if err := task.run(); err != nil {
+ logrus.Debugf("devicemapper: GetStatus() Error Run: %s", err)
+ return 0, 0, "", "", err
+ }
+
+ devinfo, err := task.getInfo()
+ if err != nil {
+ logrus.Debugf("devicemapper: GetStatus() Error GetInfo: %s", err)
+ return 0, 0, "", "", err
+ }
+ if devinfo.Exists == 0 {
+ logrus.Debugf("devicemapper: GetStatus() Non existing device %s", name)
+ return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name)
+ }
+
+ _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil))
+ return start, length, targetType, params, nil
+}
+
+// GetTable is the programmatic example for "dmsetup table".
+// It outputs the current table for the specified device name.
+func GetTable(name string) (uint64, uint64, string, string, error) {
+ task, err := TaskCreateNamed(deviceTable, name)
+ if task == nil {
+ logrus.Debugf("devicemapper: GetTable() Error TaskCreateNamed: %s", err)
+ return 0, 0, "", "", err
+ }
+ if err := task.run(); err != nil {
+ logrus.Debugf("devicemapper: GetTable() Error Run: %s", err)
+ return 0, 0, "", "", err
+ }
+
+ devinfo, err := task.getInfo()
+ if err != nil {
+ logrus.Debugf("devicemapper: GetTable() Error GetInfo: %s", err)
+ return 0, 0, "", "", err
+ }
+ if devinfo.Exists == 0 {
+ logrus.Debugf("devicemapper: GetTable() Non existing device %s", name)
+ return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name)
+ }
+
+ _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil))
+ return start, length, targetType, params, nil
+}
+
+// SetTransactionID sets a transaction id for the specified device name.
+func SetTransactionID(poolName string, oldID uint64, newID uint64) error {
+ task, err := TaskCreateNamed(deviceTargetMsg, poolName)
+ if task == nil {
+ return err
+ }
+
+ if err := task.setSector(0); err != nil {
+ return fmt.Errorf("devicemapper: Can't set sector %s", err)
+ }
+
+ if err := task.setMessage(fmt.Sprintf("set_transaction_id %d %d", oldID, newID)); err != nil {
+ return fmt.Errorf("devicemapper: Can't set message %s", err)
+ }
+
+ if err := task.run(); err != nil {
+ return fmt.Errorf("devicemapper: Error running SetTransactionID %s", err)
+ }
+ return nil
+}
+
+// SuspendDevice is the programmatic example of "dmsetup suspend".
+// It suspends the specified device.
+func SuspendDevice(name string) error {
+ task, err := TaskCreateNamed(deviceSuspend, name)
+ if task == nil {
+ return err
+ }
+ if err := task.run(); err != nil {
+ return fmt.Errorf("devicemapper: Error running deviceSuspend %s", err)
+ }
+ return nil
+}
+
+// ResumeDevice is the programmatic example of "dmsetup resume".
+// It un-suspends the specified device.
+func ResumeDevice(name string) error {
+ task, err := TaskCreateNamed(deviceResume, name)
+ if task == nil {
+ return err
+ }
+
+ var cookie uint
+ if err := task.setCookie(&cookie, 0); err != nil {
+ return fmt.Errorf("devicemapper: Can't set cookie %s", err)
+ }
+ defer UdevWait(&cookie)
+
+ if err := task.run(); err != nil {
+ return fmt.Errorf("devicemapper: Error running deviceResume %s", err)
+ }
+
+ return nil
+}
+
+// CreateDevice creates a device with the specified poolName with the specified device id.
+func CreateDevice(poolName string, deviceID int) error {
+ logrus.Debugf("devicemapper: CreateDevice(poolName=%v, deviceID=%v)", poolName, deviceID)
+ task, err := TaskCreateNamed(deviceTargetMsg, poolName)
+ if task == nil {
+ return err
+ }
+
+ if err := task.setSector(0); err != nil {
+ return fmt.Errorf("devicemapper: Can't set sector %s", err)
+ }
+
+ if err := task.setMessage(fmt.Sprintf("create_thin %d", deviceID)); err != nil {
+ return fmt.Errorf("devicemapper: Can't set message %s", err)
+ }
+
+ dmSawExist = false // reset before the task is run
+ if err := task.run(); err != nil {
+ // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id.
+ if dmSawExist {
+ return ErrDeviceIDExists
+ }
+
+ return fmt.Errorf("devicemapper: Error running CreateDevice %s", err)
+
+ }
+ return nil
+}
+
+// DeleteDevice deletes a device with the specified poolName with the specified device id.
+func DeleteDevice(poolName string, deviceID int) error {
+ task, err := TaskCreateNamed(deviceTargetMsg, poolName)
+ if task == nil {
+ return err
+ }
+
+ if err := task.setSector(0); err != nil {
+ return fmt.Errorf("devicemapper: Can't set sector %s", err)
+ }
+
+ if err := task.setMessage(fmt.Sprintf("delete %d", deviceID)); err != nil {
+ return fmt.Errorf("devicemapper: Can't set message %s", err)
+ }
+
+ dmSawBusy = false
+ if err := task.run(); err != nil {
+ if dmSawBusy {
+ return ErrBusy
+ }
+ return fmt.Errorf("devicemapper: Error running DeleteDevice %s", err)
+ }
+ return nil
+}
+
+// ActivateDevice activates the device identified by the specified
+// poolName, name and deviceID with the specified size.
+func ActivateDevice(poolName string, name string, deviceID int, size uint64) error {
+ return activateDevice(poolName, name, deviceID, size, "")
+}
+
+// ActivateDeviceWithExternal activates the device identified by the specified
+// poolName, name and deviceID with the specified size.
+func ActivateDeviceWithExternal(poolName string, name string, deviceID int, size uint64, external string) error {
+ return activateDevice(poolName, name, deviceID, size, external)
+}
+
+func activateDevice(poolName string, name string, deviceID int, size uint64, external string) error {
+ task, err := TaskCreateNamed(deviceCreate, name)
+ if task == nil {
+ return err
+ }
+
+ var params string
+ if len(external) > 0 {
+ params = fmt.Sprintf("%s %d %s", poolName, deviceID, external)
+ } else {
+ params = fmt.Sprintf("%s %d", poolName, deviceID)
+ }
+ if err := task.addTarget(0, size/512, "thin", params); err != nil {
+ return fmt.Errorf("devicemapper: Can't add target %s", err)
+ }
+ if err := task.setAddNode(addNodeOnCreate); err != nil {
+ return fmt.Errorf("devicemapper: Can't add node %s", err)
+ }
+
+ var cookie uint
+ if err := task.setCookie(&cookie, 0); err != nil {
+ return fmt.Errorf("devicemapper: Can't set cookie %s", err)
+ }
+
+ defer UdevWait(&cookie)
+
+ if err := task.run(); err != nil {
+ return fmt.Errorf("devicemapper: Error running deviceCreate (ActivateDevice) %s", err)
+ }
+
+ return nil
+}
+
+// CreateSnapDevice creates a snapshot based on the device identified by the baseName and baseDeviceId,
+func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDeviceID int) error {
+ devinfo, _ := GetInfo(baseName)
+ doSuspend := devinfo != nil && devinfo.Exists != 0
+
+ if doSuspend {
+ if err := SuspendDevice(baseName); err != nil {
+ return err
+ }
+ }
+
+ task, err := TaskCreateNamed(deviceTargetMsg, poolName)
+ if task == nil {
+ if doSuspend {
+ ResumeDevice(baseName)
+ }
+ return err
+ }
+
+ if err := task.setSector(0); err != nil {
+ if doSuspend {
+ ResumeDevice(baseName)
+ }
+ return fmt.Errorf("devicemapper: Can't set sector %s", err)
+ }
+
+ if err := task.setMessage(fmt.Sprintf("create_snap %d %d", deviceID, baseDeviceID)); err != nil {
+ if doSuspend {
+ ResumeDevice(baseName)
+ }
+ return fmt.Errorf("devicemapper: Can't set message %s", err)
+ }
+
+ dmSawExist = false // reset before the task is run
+ if err := task.run(); err != nil {
+ if doSuspend {
+ ResumeDevice(baseName)
+ }
+ // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id.
+ if dmSawExist {
+ return ErrDeviceIDExists
+ }
+
+ return fmt.Errorf("devicemapper: Error running deviceCreate (createSnapDevice) %s", err)
+
+ }
+
+ if doSuspend {
+ if err := ResumeDevice(baseName); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_log.go b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_log.go
new file mode 100644
index 00000000000..8477e36fec8
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_log.go
@@ -0,0 +1,35 @@
+// +build linux
+
+package devicemapper
+
+import "C"
+
+import (
+ "strings"
+)
+
+// Due to the way cgo works this has to be in a separate file, as devmapper.go has
+// definitions in the cgo block, which is incompatible with using "//export"
+
+// DevmapperLogCallback exports the devmapper log callback for cgo.
+//export DevmapperLogCallback
+func DevmapperLogCallback(level C.int, file *C.char, line C.int, dmErrnoOrClass C.int, message *C.char) {
+ msg := C.GoString(message)
+ if level < 7 {
+ if strings.Contains(msg, "busy") {
+ dmSawBusy = true
+ }
+
+ if strings.Contains(msg, "File exists") {
+ dmSawExist = true
+ }
+
+ if strings.Contains(msg, "No such device or address") {
+ dmSawEnxio = true
+ }
+ }
+
+ if dmLogger != nil {
+ dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dmErrnoOrClass), msg)
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper.go b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper.go
new file mode 100644
index 00000000000..91fbc85b3a4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper.go
@@ -0,0 +1,251 @@
+// +build linux
+
+package devicemapper
+
+/*
+#cgo LDFLAGS: -L. -ldevmapper
+#include
+#include // FIXME: present only for BLKGETSIZE64, maybe we can remove it?
+
+// FIXME: Can't we find a way to do the logging in pure Go?
+extern void DevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str);
+
+static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...)
+{
+ char buffer[256];
+ va_list ap;
+
+ va_start(ap, f);
+ vsnprintf(buffer, 256, f, ap);
+ va_end(ap);
+
+ DevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer);
+}
+
+static void log_with_errno_init()
+{
+ dm_log_with_errno_init(log_cb);
+}
+*/
+import "C"
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+type (
+ cdmTask C.struct_dm_task
+)
+
+// IOCTL consts
+const (
+ BlkGetSize64 = C.BLKGETSIZE64
+ BlkDiscard = C.BLKDISCARD
+)
+
+// Devicemapper cookie flags.
+const (
+ DmUdevDisableSubsystemRulesFlag = C.DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG
+ DmUdevDisableDiskRulesFlag = C.DM_UDEV_DISABLE_DISK_RULES_FLAG
+ DmUdevDisableOtherRulesFlag = C.DM_UDEV_DISABLE_OTHER_RULES_FLAG
+ DmUdevDisableLibraryFallback = C.DM_UDEV_DISABLE_LIBRARY_FALLBACK
+)
+
+// DeviceMapper mapped functions.
+var (
+ DmGetLibraryVersion = dmGetLibraryVersionFct
+ DmGetNextTarget = dmGetNextTargetFct
+ DmLogInitVerbose = dmLogInitVerboseFct
+ DmSetDevDir = dmSetDevDirFct
+ DmTaskAddTarget = dmTaskAddTargetFct
+ DmTaskCreate = dmTaskCreateFct
+ DmTaskDestroy = dmTaskDestroyFct
+ DmTaskGetDeps = dmTaskGetDepsFct
+ DmTaskGetInfo = dmTaskGetInfoFct
+ DmTaskGetDriverVersion = dmTaskGetDriverVersionFct
+ DmTaskRun = dmTaskRunFct
+ DmTaskSetAddNode = dmTaskSetAddNodeFct
+ DmTaskSetCookie = dmTaskSetCookieFct
+ DmTaskSetMessage = dmTaskSetMessageFct
+ DmTaskSetName = dmTaskSetNameFct
+ DmTaskSetRo = dmTaskSetRoFct
+ DmTaskSetSector = dmTaskSetSectorFct
+ DmUdevWait = dmUdevWaitFct
+ DmUdevSetSyncSupport = dmUdevSetSyncSupportFct
+ DmUdevGetSyncSupport = dmUdevGetSyncSupportFct
+ DmCookieSupported = dmCookieSupportedFct
+ LogWithErrnoInit = logWithErrnoInitFct
+ DmTaskDeferredRemove = dmTaskDeferredRemoveFct
+ DmTaskGetInfoWithDeferred = dmTaskGetInfoWithDeferredFct
+)
+
+func free(p *C.char) {
+ C.free(unsafe.Pointer(p))
+}
+
+func dmTaskDestroyFct(task *cdmTask) {
+ C.dm_task_destroy((*C.struct_dm_task)(task))
+}
+
+func dmTaskCreateFct(taskType int) *cdmTask {
+ return (*cdmTask)(C.dm_task_create(C.int(taskType)))
+}
+
+func dmTaskRunFct(task *cdmTask) int {
+ ret, _ := C.dm_task_run((*C.struct_dm_task)(task))
+ return int(ret)
+}
+
+func dmTaskSetNameFct(task *cdmTask, name string) int {
+ Cname := C.CString(name)
+ defer free(Cname)
+
+ return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname))
+}
+
+func dmTaskSetMessageFct(task *cdmTask, message string) int {
+ Cmessage := C.CString(message)
+ defer free(Cmessage)
+
+ return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage))
+}
+
+func dmTaskSetSectorFct(task *cdmTask, sector uint64) int {
+ return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector)))
+}
+
+func dmTaskSetCookieFct(task *cdmTask, cookie *uint, flags uint16) int {
+ cCookie := C.uint32_t(*cookie)
+ defer func() {
+ *cookie = uint(cCookie)
+ }()
+ return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags)))
+}
+
+func dmTaskSetAddNodeFct(task *cdmTask, addNode AddNodeType) int {
+ return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode)))
+}
+
+func dmTaskSetRoFct(task *cdmTask) int {
+ return int(C.dm_task_set_ro((*C.struct_dm_task)(task)))
+}
+
+func dmTaskAddTargetFct(task *cdmTask,
+ start, size uint64, ttype, params string) int {
+
+ Cttype := C.CString(ttype)
+ defer free(Cttype)
+
+ Cparams := C.CString(params)
+ defer free(Cparams)
+
+ return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams))
+}
+
+func dmTaskGetDepsFct(task *cdmTask) *Deps {
+ Cdeps := C.dm_task_get_deps((*C.struct_dm_task)(task))
+ if Cdeps == nil {
+ return nil
+ }
+
+ // golang issue: https://github.com/golang/go/issues/11925
+ hdr := reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps))),
+ Len: int(Cdeps.count),
+ Cap: int(Cdeps.count),
+ }
+ devices := *(*[]C.uint64_t)(unsafe.Pointer(&hdr))
+
+ deps := &Deps{
+ Count: uint32(Cdeps.count),
+ Filler: uint32(Cdeps.filler),
+ }
+ for _, device := range devices {
+ deps.Device = append(deps.Device, uint64(device))
+ }
+ return deps
+}
+
+func dmTaskGetInfoFct(task *cdmTask, info *Info) int {
+ Cinfo := C.struct_dm_info{}
+ defer func() {
+ info.Exists = int(Cinfo.exists)
+ info.Suspended = int(Cinfo.suspended)
+ info.LiveTable = int(Cinfo.live_table)
+ info.InactiveTable = int(Cinfo.inactive_table)
+ info.OpenCount = int32(Cinfo.open_count)
+ info.EventNr = uint32(Cinfo.event_nr)
+ info.Major = uint32(Cinfo.major)
+ info.Minor = uint32(Cinfo.minor)
+ info.ReadOnly = int(Cinfo.read_only)
+ info.TargetCount = int32(Cinfo.target_count)
+ }()
+ return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo))
+}
+
+func dmTaskGetDriverVersionFct(task *cdmTask) string {
+ buffer := C.malloc(128)
+ defer C.free(buffer)
+ res := C.dm_task_get_driver_version((*C.struct_dm_task)(task), (*C.char)(buffer), 128)
+ if res == 0 {
+ return ""
+ }
+ return C.GoString((*C.char)(buffer))
+}
+
+func dmGetNextTargetFct(task *cdmTask, next unsafe.Pointer, start, length *uint64, target, params *string) unsafe.Pointer {
+ var (
+ Cstart, Clength C.uint64_t
+ CtargetType, Cparams *C.char
+ )
+ defer func() {
+ *start = uint64(Cstart)
+ *length = uint64(Clength)
+ *target = C.GoString(CtargetType)
+ *params = C.GoString(Cparams)
+ }()
+
+ nextp := C.dm_get_next_target((*C.struct_dm_task)(task), next, &Cstart, &Clength, &CtargetType, &Cparams)
+ return nextp
+}
+
+func dmUdevSetSyncSupportFct(syncWithUdev int) {
+ (C.dm_udev_set_sync_support(C.int(syncWithUdev)))
+}
+
+func dmUdevGetSyncSupportFct() int {
+ return int(C.dm_udev_get_sync_support())
+}
+
+func dmUdevWaitFct(cookie uint) int {
+ return int(C.dm_udev_wait(C.uint32_t(cookie)))
+}
+
+func dmCookieSupportedFct() int {
+ return int(C.dm_cookie_supported())
+}
+
+func dmLogInitVerboseFct(level int) {
+ C.dm_log_init_verbose(C.int(level))
+}
+
+func logWithErrnoInitFct() {
+ C.log_with_errno_init()
+}
+
+func dmSetDevDirFct(dir string) int {
+ Cdir := C.CString(dir)
+ defer free(Cdir)
+
+ return int(C.dm_set_dev_dir(Cdir))
+}
+
+func dmGetLibraryVersionFct(version *string) int {
+ buffer := C.CString(string(make([]byte, 128)))
+ defer free(buffer)
+ defer func() {
+ *version = C.GoString(buffer)
+ }()
+ return int(C.dm_get_library_version(buffer, 128))
+}
diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_deferred_remove.go b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_deferred_remove.go
new file mode 100644
index 00000000000..dc361eab765
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_deferred_remove.go
@@ -0,0 +1,34 @@
+// +build linux,!libdm_no_deferred_remove
+
+package devicemapper
+
+/*
+#cgo LDFLAGS: -L. -ldevmapper
+#include
+*/
+import "C"
+
+// LibraryDeferredRemovalSupport is supported when statically linked.
+const LibraryDeferredRemovalSupport = true
+
+func dmTaskDeferredRemoveFct(task *cdmTask) int {
+ return int(C.dm_task_deferred_remove((*C.struct_dm_task)(task)))
+}
+
+func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int {
+ Cinfo := C.struct_dm_info{}
+ defer func() {
+ info.Exists = int(Cinfo.exists)
+ info.Suspended = int(Cinfo.suspended)
+ info.LiveTable = int(Cinfo.live_table)
+ info.InactiveTable = int(Cinfo.inactive_table)
+ info.OpenCount = int32(Cinfo.open_count)
+ info.EventNr = uint32(Cinfo.event_nr)
+ info.Major = uint32(Cinfo.major)
+ info.Minor = uint32(Cinfo.minor)
+ info.ReadOnly = int(Cinfo.read_only)
+ info.TargetCount = int32(Cinfo.target_count)
+ info.DeferredRemove = int(Cinfo.deferred_remove)
+ }()
+ return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo))
+}
diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go
new file mode 100644
index 00000000000..4a6665de860
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go
@@ -0,0 +1,15 @@
+// +build linux,libdm_no_deferred_remove
+
+package devicemapper
+
+// LibraryDeferredRemovalsupport is not supported when statically linked.
+const LibraryDeferredRemovalSupport = false
+
+func dmTaskDeferredRemoveFct(task *cdmTask) int {
+ // Error. Nobody should be calling it.
+ return -1
+}
+
+func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int {
+ return -1
+}
diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/ioctl.go b/vendor/github.com/docker/docker/pkg/devicemapper/ioctl.go
new file mode 100644
index 00000000000..581b57eb86f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/devicemapper/ioctl.go
@@ -0,0 +1,27 @@
+// +build linux
+
+package devicemapper
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func ioctlBlkGetSize64(fd uintptr) (int64, error) {
+ var size int64
+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 {
+ return 0, err
+ }
+ return size, nil
+}
+
+func ioctlBlkDiscard(fd uintptr, offset, length uint64) error {
+ var r [2]uint64
+ r[0] = offset
+ r[1] = length
+
+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/log.go b/vendor/github.com/docker/docker/pkg/devicemapper/log.go
new file mode 100644
index 00000000000..cee5e545498
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/devicemapper/log.go
@@ -0,0 +1,11 @@
+package devicemapper
+
+// definitions from lvm2 lib/log/log.h
+const (
+ LogLevelFatal = 2 + iota // _LOG_FATAL
+ LogLevelErr // _LOG_ERR
+ LogLevelWarn // _LOG_WARN
+ LogLevelNotice // _LOG_NOTICE
+ LogLevelInfo // _LOG_INFO
+ LogLevelDebug // _LOG_DEBUG
+)
diff --git a/vendor/github.com/docker/docker/pkg/directory/directory.go b/vendor/github.com/docker/docker/pkg/directory/directory.go
new file mode 100644
index 00000000000..1715ef45d9b
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/directory/directory.go
@@ -0,0 +1,26 @@
+package directory
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+// MoveToSubdir moves all contents of a directory to a subdirectory underneath the original path
+func MoveToSubdir(oldpath, subdir string) error {
+
+ infos, err := ioutil.ReadDir(oldpath)
+ if err != nil {
+ return err
+ }
+ for _, info := range infos {
+ if info.Name() != subdir {
+ oldName := filepath.Join(oldpath, info.Name())
+ newName := filepath.Join(oldpath, subdir, info.Name())
+ if err := os.Rename(oldName, newName); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/directory/directory_test.go b/vendor/github.com/docker/docker/pkg/directory/directory_test.go
new file mode 100644
index 00000000000..2b7a4657be1
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/directory/directory_test.go
@@ -0,0 +1,192 @@
+package directory
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "reflect"
+ "sort"
+ "testing"
+)
+
+// Size of an empty directory should be 0
+func TestSizeEmpty(t *testing.T) {
+ var dir string
+ var err error
+ if dir, err = ioutil.TempDir(os.TempDir(), "testSizeEmptyDirectory"); err != nil {
+ t.Fatalf("failed to create directory: %s", err)
+ }
+
+ var size int64
+ if size, _ = Size(dir); size != 0 {
+ t.Fatalf("empty directory has size: %d", size)
+ }
+}
+
+// Size of a directory with one empty file should be 0
+func TestSizeEmptyFile(t *testing.T) {
+ var dir string
+ var err error
+ if dir, err = ioutil.TempDir(os.TempDir(), "testSizeEmptyFile"); err != nil {
+ t.Fatalf("failed to create directory: %s", err)
+ }
+
+ var file *os.File
+ if file, err = ioutil.TempFile(dir, "file"); err != nil {
+ t.Fatalf("failed to create file: %s", err)
+ }
+
+ var size int64
+ if size, _ = Size(file.Name()); size != 0 {
+ t.Fatalf("directory with one file has size: %d", size)
+ }
+}
+
+// Size of a directory with one 5-byte file should be 5
+func TestSizeNonemptyFile(t *testing.T) {
+ var dir string
+ var err error
+ if dir, err = ioutil.TempDir(os.TempDir(), "testSizeNonemptyFile"); err != nil {
+ t.Fatalf("failed to create directory: %s", err)
+ }
+
+ var file *os.File
+ if file, err = ioutil.TempFile(dir, "file"); err != nil {
+ t.Fatalf("failed to create file: %s", err)
+ }
+
+ d := []byte{97, 98, 99, 100, 101}
+ file.Write(d)
+
+ var size int64
+ if size, _ = Size(file.Name()); size != 5 {
+ t.Fatalf("directory with one 5-byte file has size: %d", size)
+ }
+}
+
+// Size of a directory with one empty directory should be 0
+func TestSizeNestedDirectoryEmpty(t *testing.T) {
+ var dir string
+ var err error
+ if dir, err = ioutil.TempDir(os.TempDir(), "testSizeNestedDirectoryEmpty"); err != nil {
+ t.Fatalf("failed to create directory: %s", err)
+ }
+ if dir, err = ioutil.TempDir(dir, "nested"); err != nil {
+ t.Fatalf("failed to create nested directory: %s", err)
+ }
+
+ var size int64
+ if size, _ = Size(dir); size != 0 {
+ t.Fatalf("directory with one empty directory has size: %d", size)
+ }
+}
+
+// Test directory with 1 file and 1 empty directory
+func TestSizeFileAndNestedDirectoryEmpty(t *testing.T) {
+ var dir string
+ var err error
+ if dir, err = ioutil.TempDir(os.TempDir(), "testSizeFileAndNestedDirectoryEmpty"); err != nil {
+ t.Fatalf("failed to create directory: %s", err)
+ }
+ if dir, err = ioutil.TempDir(dir, "nested"); err != nil {
+ t.Fatalf("failed to create nested directory: %s", err)
+ }
+
+ var file *os.File
+ if file, err = ioutil.TempFile(dir, "file"); err != nil {
+ t.Fatalf("failed to create file: %s", err)
+ }
+
+ d := []byte{100, 111, 99, 107, 101, 114}
+ file.Write(d)
+
+ var size int64
+ if size, _ = Size(dir); size != 6 {
+ t.Fatalf("directory with 6-byte file and empty directory has size: %d", size)
+ }
+}
+
+// Test directory with 1 file and 1 non-empty directory
+func TestSizeFileAndNestedDirectoryNonempty(t *testing.T) {
+ var dir, dirNested string
+ var err error
+ if dir, err = ioutil.TempDir(os.TempDir(), "TestSizeFileAndNestedDirectoryNonempty"); err != nil {
+ t.Fatalf("failed to create directory: %s", err)
+ }
+ if dirNested, err = ioutil.TempDir(dir, "nested"); err != nil {
+ t.Fatalf("failed to create nested directory: %s", err)
+ }
+
+ var file *os.File
+ if file, err = ioutil.TempFile(dir, "file"); err != nil {
+ t.Fatalf("failed to create file: %s", err)
+ }
+
+ data := []byte{100, 111, 99, 107, 101, 114}
+ file.Write(data)
+
+ var nestedFile *os.File
+ if nestedFile, err = ioutil.TempFile(dirNested, "file"); err != nil {
+ t.Fatalf("failed to create file in nested directory: %s", err)
+ }
+
+ nestedData := []byte{100, 111, 99, 107, 101, 114}
+ nestedFile.Write(nestedData)
+
+ var size int64
+ if size, _ = Size(dir); size != 12 {
+ t.Fatalf("directory with 6-byte file and nested directory with 6-byte file has size: %d", size)
+ }
+}
+
+// Test migration of directory to a subdir underneath itself
+func TestMoveToSubdir(t *testing.T) {
+ var outerDir, subDir string
+ var err error
+
+ if outerDir, err = ioutil.TempDir(os.TempDir(), "TestMoveToSubdir"); err != nil {
+ t.Fatalf("failed to create directory: %v", err)
+ }
+
+ if subDir, err = ioutil.TempDir(outerDir, "testSub"); err != nil {
+ t.Fatalf("failed to create subdirectory: %v", err)
+ }
+
+ // write 4 temp files in the outer dir to get moved
+ filesList := []string{"a", "b", "c", "d"}
+ for _, fName := range filesList {
+ if file, err := os.Create(filepath.Join(outerDir, fName)); err != nil {
+ t.Fatalf("couldn't create temp file %q: %v", fName, err)
+ } else {
+ file.WriteString(fName)
+ file.Close()
+ }
+ }
+
+ if err = MoveToSubdir(outerDir, filepath.Base(subDir)); err != nil {
+ t.Fatalf("Error during migration of content to subdirectory: %v", err)
+ }
+ // validate that the files were moved to the subdirectory
+ infos, err := ioutil.ReadDir(subDir)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(infos) != 4 {
+ t.Fatalf("Should be four files in the subdir after the migration: actual length: %d", len(infos))
+ }
+ var results []string
+ for _, info := range infos {
+ results = append(results, info.Name())
+ }
+ sort.Sort(sort.StringSlice(results))
+ if !reflect.DeepEqual(filesList, results) {
+ t.Fatalf("Results after migration do not equal list of files: expected: %v, got: %v", filesList, results)
+ }
+}
+
+// Test a non-existing directory
+func TestSizeNonExistingDirectory(t *testing.T) {
+ if _, err := Size("/thisdirectoryshouldnotexist/TestSizeNonExistingDirectory"); err == nil {
+ t.Fatalf("error is expected")
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/directory/directory_unix.go b/vendor/github.com/docker/docker/pkg/directory/directory_unix.go
new file mode 100644
index 00000000000..397251bdb8b
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/directory/directory_unix.go
@@ -0,0 +1,48 @@
+// +build linux freebsd solaris
+
+package directory
+
+import (
+ "os"
+ "path/filepath"
+ "syscall"
+)
+
+// Size walks a directory tree and returns its total size in bytes.
+func Size(dir string) (size int64, err error) {
+ data := make(map[uint64]struct{})
+ err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error {
+ if err != nil {
+ // if dir does not exist, Size() returns the error.
+ // if dir/x disappeared while walking, Size() ignores dir/x.
+ if os.IsNotExist(err) && d != dir {
+ return nil
+ }
+ return err
+ }
+
+ // Ignore directory sizes
+ if fileInfo == nil {
+ return nil
+ }
+
+ s := fileInfo.Size()
+ if fileInfo.IsDir() || s == 0 {
+ return nil
+ }
+
+ // Check inode to handle hard links correctly
+ inode := fileInfo.Sys().(*syscall.Stat_t).Ino
+ // inode is not a uint64 on all platforms. Cast it to avoid issues.
+ if _, exists := data[uint64(inode)]; exists {
+ return nil
+ }
+ // inode is not a uint64 on all platforms. Cast it to avoid issues.
+ data[uint64(inode)] = struct{}{}
+
+ size += s
+
+ return nil
+ })
+ return
+}
diff --git a/vendor/github.com/docker/docker/pkg/directory/directory_windows.go b/vendor/github.com/docker/docker/pkg/directory/directory_windows.go
new file mode 100644
index 00000000000..6fb0917c4c7
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/directory/directory_windows.go
@@ -0,0 +1,37 @@
+// +build windows
+
+package directory
+
+import (
+ "os"
+ "path/filepath"
+)
+
+// Size walks a directory tree and returns its total size in bytes.
+func Size(dir string) (size int64, err error) {
+ err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error {
+ if err != nil {
+ // if dir does not exist, Size() returns the error.
+ // if dir/x disappeared while walking, Size() ignores dir/x.
+ if os.IsNotExist(err) && d != dir {
+ return nil
+ }
+ return err
+ }
+
+ // Ignore directory sizes
+ if fileInfo == nil {
+ return nil
+ }
+
+ s := fileInfo.Size()
+ if fileInfo.IsDir() || s == 0 {
+ return nil
+ }
+
+ size += s
+
+ return nil
+ })
+ return
+}
diff --git a/vendor/github.com/docker/docker/pkg/discovery/README.md b/vendor/github.com/docker/docker/pkg/discovery/README.md
new file mode 100644
index 00000000000..39777c2171d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/discovery/README.md
@@ -0,0 +1,41 @@
+---
+page_title: Docker discovery
+page_description: discovery
+page_keywords: docker, clustering, discovery
+---
+
+# Discovery
+
+Docker comes with multiple Discovery backends.
+
+## Backends
+
+### Using etcd
+
+Point your Docker Engine instances to a common etcd instance. You can specify
+the address Docker uses to advertise the node using the `--cluster-advertise`
+flag.
+
+```bash
+$ docker daemon -H= --cluster-advertise= --cluster-store etcd://,/
+```
+
+### Using consul
+
+Point your Docker Engine instances to a common Consul instance. You can specify
+the address Docker uses to advertise the node using the `--cluster-advertise`
+flag.
+
+```bash
+$ docker daemon -H= --cluster-advertise= --cluster-store consul:///
+```
+
+### Using zookeeper
+
+Point your Docker Engine instances to a common Zookeeper instance. You can specify
+the address Docker uses to advertise the node using the `--cluster-advertise`
+flag.
+
+```bash
+$ docker daemon -H= --cluster-advertise= --cluster-store zk://,/
+```
diff --git a/vendor/github.com/docker/docker/pkg/discovery/backends.go b/vendor/github.com/docker/docker/pkg/discovery/backends.go
new file mode 100644
index 00000000000..edfa4fd3a8c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/discovery/backends.go
@@ -0,0 +1,107 @@
+package discovery
+
+import (
+ "fmt"
+ "net"
+ "strings"
+ "time"
+
+ log "github.com/Sirupsen/logrus"
+)
+
+var (
+ // Backends is a global map of discovery backends indexed by their
+ // associated scheme.
+ backends = make(map[string]Backend)
+)
+
+// Register makes a discovery backend available by the provided scheme.
+// If Register is called twice with the same scheme an error is returned.
+func Register(scheme string, d Backend) error {
+ if _, exists := backends[scheme]; exists {
+ return fmt.Errorf("scheme already registered %s", scheme)
+ }
+ log.WithField("name", scheme).Debug("Registering discovery service")
+ backends[scheme] = d
+ return nil
+}
+
+func parse(rawurl string) (string, string) {
+ parts := strings.SplitN(rawurl, "://", 2)
+
+ // nodes:port,node2:port => nodes://node1:port,node2:port
+ if len(parts) == 1 {
+ return "nodes", parts[0]
+ }
+ return parts[0], parts[1]
+}
+
+// ParseAdvertise parses the --cluster-advertise daemon config which accepts
+// : or :
+func ParseAdvertise(advertise string) (string, error) {
+ var (
+ iface *net.Interface
+ addrs []net.Addr
+ err error
+ )
+
+ addr, port, err := net.SplitHostPort(advertise)
+
+ if err != nil {
+ return "", fmt.Errorf("invalid --cluster-advertise configuration: %s: %v", advertise, err)
+ }
+
+ ip := net.ParseIP(addr)
+ // If it is a valid ip-address, use it as is
+ if ip != nil {
+ return advertise, nil
+ }
+
+ // If advertise is a valid interface name, get the valid IPv4 address and use it to advertise
+ ifaceName := addr
+ iface, err = net.InterfaceByName(ifaceName)
+ if err != nil {
+ return "", fmt.Errorf("invalid cluster advertise IP address or interface name (%s) : %v", advertise, err)
+ }
+
+ addrs, err = iface.Addrs()
+ if err != nil {
+ return "", fmt.Errorf("unable to get advertise IP address from interface (%s) : %v", advertise, err)
+ }
+
+ if addrs == nil || len(addrs) == 0 {
+ return "", fmt.Errorf("no available advertise IP address in interface (%s)", advertise)
+ }
+
+ addr = ""
+ for _, a := range addrs {
+ ip, _, err := net.ParseCIDR(a.String())
+ if err != nil {
+ return "", fmt.Errorf("error deriving advertise ip-address in interface (%s) : %v", advertise, err)
+ }
+ if ip.To4() == nil || ip.IsLoopback() {
+ continue
+ }
+ addr = ip.String()
+ break
+ }
+ if addr == "" {
+ return "", fmt.Errorf("couldnt find a valid ip-address in interface %s", advertise)
+ }
+
+ addr = net.JoinHostPort(addr, port)
+ return addr, nil
+}
+
+// New returns a new Discovery given a URL, heartbeat and ttl settings.
+// Returns an error if the URL scheme is not supported.
+func New(rawurl string, heartbeat time.Duration, ttl time.Duration, clusterOpts map[string]string) (Backend, error) {
+ scheme, uri := parse(rawurl)
+ if backend, exists := backends[scheme]; exists {
+ log.WithFields(log.Fields{"name": scheme, "uri": uri}).Debug("Initializing discovery service")
+ err := backend.Initialize(uri, heartbeat, ttl, clusterOpts)
+ return backend, err
+ }
+
+ return nil, ErrNotSupported
+}
diff --git a/vendor/github.com/docker/docker/pkg/discovery/discovery.go b/vendor/github.com/docker/docker/pkg/discovery/discovery.go
new file mode 100644
index 00000000000..ca7f587458e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/discovery/discovery.go
@@ -0,0 +1,35 @@
+package discovery
+
+import (
+ "errors"
+ "time"
+)
+
+var (
+ // ErrNotSupported is returned when a discovery service is not supported.
+ ErrNotSupported = errors.New("discovery service not supported")
+
+ // ErrNotImplemented is returned when discovery feature is not implemented
+ // by discovery backend.
+ ErrNotImplemented = errors.New("not implemented in this discovery service")
+)
+
+// Watcher provides watching over a cluster for nodes joining and leaving.
+type Watcher interface {
+ // Watch the discovery for entry changes.
+ // Returns a channel that will receive changes or an error.
+ // Providing a non-nil stopCh can be used to stop watching.
+ Watch(stopCh <-chan struct{}) (<-chan Entries, <-chan error)
+}
+
+// Backend is implemented by discovery backends which manage cluster entries.
+type Backend interface {
+ // Watcher must be provided by every backend.
+ Watcher
+
+ // Initialize the discovery with URIs, a heartbeat, a ttl and optional settings.
+ Initialize(string, time.Duration, time.Duration, map[string]string) error
+
+ // Register to the discovery.
+ Register(string) error
+}
diff --git a/vendor/github.com/docker/docker/pkg/discovery/discovery_test.go b/vendor/github.com/docker/docker/pkg/discovery/discovery_test.go
new file mode 100644
index 00000000000..6084f3ef0d0
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/discovery/discovery_test.go
@@ -0,0 +1,137 @@
+package discovery
+
+import (
+ "testing"
+
+ "github.com/go-check/check"
+)
+
+// Hook up gocheck into the "go test" runner.
+func Test(t *testing.T) { check.TestingT(t) }
+
+type DiscoverySuite struct{}
+
+var _ = check.Suite(&DiscoverySuite{})
+
+func (s *DiscoverySuite) TestNewEntry(c *check.C) {
+ entry, err := NewEntry("127.0.0.1:2375")
+ c.Assert(err, check.IsNil)
+ c.Assert(entry.Equals(&Entry{Host: "127.0.0.1", Port: "2375"}), check.Equals, true)
+ c.Assert(entry.String(), check.Equals, "127.0.0.1:2375")
+
+ entry, err = NewEntry("[2001:db8:0:f101::2]:2375")
+ c.Assert(err, check.IsNil)
+ c.Assert(entry.Equals(&Entry{Host: "2001:db8:0:f101::2", Port: "2375"}), check.Equals, true)
+ c.Assert(entry.String(), check.Equals, "[2001:db8:0:f101::2]:2375")
+
+ _, err = NewEntry("127.0.0.1")
+ c.Assert(err, check.NotNil)
+}
+
+func (s *DiscoverySuite) TestParse(c *check.C) {
+ scheme, uri := parse("127.0.0.1:2375")
+ c.Assert(scheme, check.Equals, "nodes")
+ c.Assert(uri, check.Equals, "127.0.0.1:2375")
+
+ scheme, uri = parse("localhost:2375")
+ c.Assert(scheme, check.Equals, "nodes")
+ c.Assert(uri, check.Equals, "localhost:2375")
+
+ scheme, uri = parse("scheme://127.0.0.1:2375")
+ c.Assert(scheme, check.Equals, "scheme")
+ c.Assert(uri, check.Equals, "127.0.0.1:2375")
+
+ scheme, uri = parse("scheme://localhost:2375")
+ c.Assert(scheme, check.Equals, "scheme")
+ c.Assert(uri, check.Equals, "localhost:2375")
+
+ scheme, uri = parse("")
+ c.Assert(scheme, check.Equals, "nodes")
+ c.Assert(uri, check.Equals, "")
+}
+
+func (s *DiscoverySuite) TestCreateEntries(c *check.C) {
+ entries, err := CreateEntries(nil)
+ c.Assert(entries, check.DeepEquals, Entries{})
+ c.Assert(err, check.IsNil)
+
+ entries, err = CreateEntries([]string{"127.0.0.1:2375", "127.0.0.2:2375", "[2001:db8:0:f101::2]:2375", ""})
+ c.Assert(err, check.IsNil)
+ expected := Entries{
+ &Entry{Host: "127.0.0.1", Port: "2375"},
+ &Entry{Host: "127.0.0.2", Port: "2375"},
+ &Entry{Host: "2001:db8:0:f101::2", Port: "2375"},
+ }
+ c.Assert(entries.Equals(expected), check.Equals, true)
+
+ _, err = CreateEntries([]string{"127.0.0.1", "127.0.0.2"})
+ c.Assert(err, check.NotNil)
+}
+
+func (s *DiscoverySuite) TestContainsEntry(c *check.C) {
+ entries, err := CreateEntries([]string{"127.0.0.1:2375", "127.0.0.2:2375", ""})
+ c.Assert(err, check.IsNil)
+ c.Assert(entries.Contains(&Entry{Host: "127.0.0.1", Port: "2375"}), check.Equals, true)
+ c.Assert(entries.Contains(&Entry{Host: "127.0.0.3", Port: "2375"}), check.Equals, false)
+}
+
+func (s *DiscoverySuite) TestEntriesEquality(c *check.C) {
+ entries := Entries{
+ &Entry{Host: "127.0.0.1", Port: "2375"},
+ &Entry{Host: "127.0.0.2", Port: "2375"},
+ }
+
+ // Same
+ c.Assert(entries.Equals(Entries{
+ &Entry{Host: "127.0.0.1", Port: "2375"},
+ &Entry{Host: "127.0.0.2", Port: "2375"},
+ }), check.
+ Equals, true)
+
+ // Different size
+ c.Assert(entries.Equals(Entries{
+ &Entry{Host: "127.0.0.1", Port: "2375"},
+ &Entry{Host: "127.0.0.2", Port: "2375"},
+ &Entry{Host: "127.0.0.3", Port: "2375"},
+ }), check.
+ Equals, false)
+
+ // Different content
+ c.Assert(entries.Equals(Entries{
+ &Entry{Host: "127.0.0.1", Port: "2375"},
+ &Entry{Host: "127.0.0.42", Port: "2375"},
+ }), check.
+ Equals, false)
+
+}
+
+func (s *DiscoverySuite) TestEntriesDiff(c *check.C) {
+ entry1 := &Entry{Host: "1.1.1.1", Port: "1111"}
+ entry2 := &Entry{Host: "2.2.2.2", Port: "2222"}
+ entry3 := &Entry{Host: "3.3.3.3", Port: "3333"}
+ entries := Entries{entry1, entry2}
+
+ // No diff
+ added, removed := entries.Diff(Entries{entry2, entry1})
+ c.Assert(added, check.HasLen, 0)
+ c.Assert(removed, check.HasLen, 0)
+
+ // Add
+ added, removed = entries.Diff(Entries{entry2, entry3, entry1})
+ c.Assert(added, check.HasLen, 1)
+ c.Assert(added.Contains(entry3), check.Equals, true)
+ c.Assert(removed, check.HasLen, 0)
+
+ // Remove
+ added, removed = entries.Diff(Entries{entry2})
+ c.Assert(added, check.HasLen, 0)
+ c.Assert(removed, check.HasLen, 1)
+ c.Assert(removed.Contains(entry1), check.Equals, true)
+
+ // Add and remove
+ added, removed = entries.Diff(Entries{entry1, entry3})
+ c.Assert(added, check.HasLen, 1)
+ c.Assert(added.Contains(entry3), check.Equals, true)
+ c.Assert(removed, check.HasLen, 1)
+ c.Assert(removed.Contains(entry2), check.Equals, true)
+}
diff --git a/vendor/github.com/docker/docker/pkg/discovery/entry.go b/vendor/github.com/docker/docker/pkg/discovery/entry.go
new file mode 100644
index 00000000000..ce23bbf89bd
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/discovery/entry.go
@@ -0,0 +1,94 @@
+package discovery
+
+import "net"
+
+// NewEntry creates a new entry.
+func NewEntry(url string) (*Entry, error) {
+ host, port, err := net.SplitHostPort(url)
+ if err != nil {
+ return nil, err
+ }
+ return &Entry{host, port}, nil
+}
+
+// An Entry represents a host.
+type Entry struct {
+ Host string
+ Port string
+}
+
+// Equals returns true if cmp contains the same data.
+func (e *Entry) Equals(cmp *Entry) bool {
+ return e.Host == cmp.Host && e.Port == cmp.Port
+}
+
+// String returns the string form of an entry.
+func (e *Entry) String() string {
+ return net.JoinHostPort(e.Host, e.Port)
+}
+
+// Entries is a list of *Entry with some helpers.
+type Entries []*Entry
+
+// Equals returns true if cmp contains the same data.
+func (e Entries) Equals(cmp Entries) bool {
+ // Check if the file has really changed.
+ if len(e) != len(cmp) {
+ return false
+ }
+ for i := range e {
+ if !e[i].Equals(cmp[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// Contains returns true if the Entries contain a given Entry.
+func (e Entries) Contains(entry *Entry) bool {
+ for _, curr := range e {
+ if curr.Equals(entry) {
+ return true
+ }
+ }
+ return false
+}
+
+// Diff compares two entries and returns the added and removed entries.
+func (e Entries) Diff(cmp Entries) (Entries, Entries) {
+ added := Entries{}
+ for _, entry := range cmp {
+ if !e.Contains(entry) {
+ added = append(added, entry)
+ }
+ }
+
+ removed := Entries{}
+ for _, entry := range e {
+ if !cmp.Contains(entry) {
+ removed = append(removed, entry)
+ }
+ }
+
+ return added, removed
+}
+
+// CreateEntries returns an array of entries based on the given addresses.
+func CreateEntries(addrs []string) (Entries, error) {
+ entries := Entries{}
+ if addrs == nil {
+ return entries, nil
+ }
+
+ for _, addr := range addrs {
+ if len(addr) == 0 {
+ continue
+ }
+ entry, err := NewEntry(addr)
+ if err != nil {
+ return nil, err
+ }
+ entries = append(entries, entry)
+ }
+ return entries, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/discovery/file/file.go b/vendor/github.com/docker/docker/pkg/discovery/file/file.go
new file mode 100644
index 00000000000..b4f870b8640
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/discovery/file/file.go
@@ -0,0 +1,109 @@
+package file
+
+import (
+ "fmt"
+ "io/ioutil"
+ "strings"
+ "time"
+
+ "github.com/docker/docker/pkg/discovery"
+)
+
+// Discovery is exported
+type Discovery struct {
+ heartbeat time.Duration
+ path string
+}
+
+func init() {
+ Init()
+}
+
+// Init is exported
+func Init() {
+ discovery.Register("file", &Discovery{})
+}
+
+// Initialize is exported
+func (s *Discovery) Initialize(path string, heartbeat time.Duration, ttl time.Duration, _ map[string]string) error {
+ s.path = path
+ s.heartbeat = heartbeat
+ return nil
+}
+
+func parseFileContent(content []byte) []string {
+ var result []string
+ for _, line := range strings.Split(strings.TrimSpace(string(content)), "\n") {
+ line = strings.TrimSpace(line)
+ // Ignoring line starts with #
+ if strings.HasPrefix(line, "#") {
+ continue
+ }
+ // Inlined # comment also ignored.
+ if strings.Contains(line, "#") {
+ line = line[0:strings.Index(line, "#")]
+ // Trim additional spaces caused by above stripping.
+ line = strings.TrimSpace(line)
+ }
+ for _, ip := range discovery.Generate(line) {
+ result = append(result, ip)
+ }
+ }
+ return result
+}
+
+func (s *Discovery) fetch() (discovery.Entries, error) {
+ fileContent, err := ioutil.ReadFile(s.path)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read '%s': %v", s.path, err)
+ }
+ return discovery.CreateEntries(parseFileContent(fileContent))
+}
+
+// Watch is exported
+func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) {
+ ch := make(chan discovery.Entries)
+ errCh := make(chan error)
+ ticker := time.NewTicker(s.heartbeat)
+
+ go func() {
+ defer close(errCh)
+ defer close(ch)
+
+ // Send the initial entries if available.
+ currentEntries, err := s.fetch()
+ if err != nil {
+ errCh <- err
+ } else {
+ ch <- currentEntries
+ }
+
+ // Periodically send updates.
+ for {
+ select {
+ case <-ticker.C:
+ newEntries, err := s.fetch()
+ if err != nil {
+ errCh <- err
+ continue
+ }
+
+ // Check if the file has really changed.
+ if !newEntries.Equals(currentEntries) {
+ ch <- newEntries
+ }
+ currentEntries = newEntries
+ case <-stopCh:
+ ticker.Stop()
+ return
+ }
+ }
+ }()
+
+ return ch, errCh
+}
+
+// Register is exported
+func (s *Discovery) Register(addr string) error {
+ return discovery.ErrNotImplemented
+}
diff --git a/vendor/github.com/docker/docker/pkg/discovery/file/file_test.go b/vendor/github.com/docker/docker/pkg/discovery/file/file_test.go
new file mode 100644
index 00000000000..667f00ba0d0
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/discovery/file/file_test.go
@@ -0,0 +1,114 @@
+package file
+
+import (
+ "io/ioutil"
+ "os"
+ "testing"
+
+ "github.com/docker/docker/pkg/discovery"
+
+ "github.com/go-check/check"
+)
+
+// Hook up gocheck into the "go test" runner.
+func Test(t *testing.T) { check.TestingT(t) }
+
+type DiscoverySuite struct{}
+
+var _ = check.Suite(&DiscoverySuite{})
+
+func (s *DiscoverySuite) TestInitialize(c *check.C) {
+ d := &Discovery{}
+ d.Initialize("/path/to/file", 1000, 0, nil)
+ c.Assert(d.path, check.Equals, "/path/to/file")
+}
+
+func (s *DiscoverySuite) TestNew(c *check.C) {
+ d, err := discovery.New("file:///path/to/file", 0, 0, nil)
+ c.Assert(err, check.IsNil)
+ c.Assert(d.(*Discovery).path, check.Equals, "/path/to/file")
+}
+
+func (s *DiscoverySuite) TestContent(c *check.C) {
+ data := `
+1.1.1.[1:2]:1111
+2.2.2.[2:4]:2222
+`
+ ips := parseFileContent([]byte(data))
+ c.Assert(ips, check.HasLen, 5)
+ c.Assert(ips[0], check.Equals, "1.1.1.1:1111")
+ c.Assert(ips[1], check.Equals, "1.1.1.2:1111")
+ c.Assert(ips[2], check.Equals, "2.2.2.2:2222")
+ c.Assert(ips[3], check.Equals, "2.2.2.3:2222")
+ c.Assert(ips[4], check.Equals, "2.2.2.4:2222")
+}
+
+func (s *DiscoverySuite) TestRegister(c *check.C) {
+ discovery := &Discovery{path: "/path/to/file"}
+ c.Assert(discovery.Register("0.0.0.0"), check.NotNil)
+}
+
+func (s *DiscoverySuite) TestParsingContentsWithComments(c *check.C) {
+ data := `
+### test ###
+1.1.1.1:1111 # inline comment
+# 2.2.2.2:2222
+ ### empty line with comment
+ 3.3.3.3:3333
+### test ###
+`
+ ips := parseFileContent([]byte(data))
+ c.Assert(ips, check.HasLen, 2)
+ c.Assert("1.1.1.1:1111", check.Equals, ips[0])
+ c.Assert("3.3.3.3:3333", check.Equals, ips[1])
+}
+
+func (s *DiscoverySuite) TestWatch(c *check.C) {
+ data := `
+1.1.1.1:1111
+2.2.2.2:2222
+`
+ expected := discovery.Entries{
+ &discovery.Entry{Host: "1.1.1.1", Port: "1111"},
+ &discovery.Entry{Host: "2.2.2.2", Port: "2222"},
+ }
+
+ // Create a temporary file and remove it.
+ tmp, err := ioutil.TempFile(os.TempDir(), "discovery-file-test")
+ c.Assert(err, check.IsNil)
+ c.Assert(tmp.Close(), check.IsNil)
+ c.Assert(os.Remove(tmp.Name()), check.IsNil)
+
+ // Set up file discovery.
+ d := &Discovery{}
+ d.Initialize(tmp.Name(), 1000, 0, nil)
+ stopCh := make(chan struct{})
+ ch, errCh := d.Watch(stopCh)
+
+ // Make sure it fires errors since the file doesn't exist.
+ c.Assert(<-errCh, check.NotNil)
+ // We have to drain the error channel otherwise Watch will get stuck.
+ go func() {
+ for range errCh {
+ }
+ }()
+
+ // Write the file and make sure we get the expected value back.
+ c.Assert(ioutil.WriteFile(tmp.Name(), []byte(data), 0600), check.IsNil)
+ c.Assert(<-ch, check.DeepEquals, expected)
+
+ // Add a new entry and look it up.
+ expected = append(expected, &discovery.Entry{Host: "3.3.3.3", Port: "3333"})
+ f, err := os.OpenFile(tmp.Name(), os.O_APPEND|os.O_WRONLY, 0600)
+ c.Assert(err, check.IsNil)
+ c.Assert(f, check.NotNil)
+ _, err = f.WriteString("\n3.3.3.3:3333\n")
+ c.Assert(err, check.IsNil)
+ f.Close()
+ c.Assert(<-ch, check.DeepEquals, expected)
+
+ // Stop and make sure it closes all channels.
+ close(stopCh)
+ c.Assert(<-ch, check.IsNil)
+ c.Assert(<-errCh, check.IsNil)
+}
diff --git a/vendor/github.com/docker/docker/pkg/discovery/generator.go b/vendor/github.com/docker/docker/pkg/discovery/generator.go
new file mode 100644
index 00000000000..d22298298fa
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/discovery/generator.go
@@ -0,0 +1,35 @@
+package discovery
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+)
+
+// Generate takes care of IP generation
+func Generate(pattern string) []string {
+ re, _ := regexp.Compile(`\[(.+):(.+)\]`)
+ submatch := re.FindStringSubmatch(pattern)
+ if submatch == nil {
+ return []string{pattern}
+ }
+
+ from, err := strconv.Atoi(submatch[1])
+ if err != nil {
+ return []string{pattern}
+ }
+ to, err := strconv.Atoi(submatch[2])
+ if err != nil {
+ return []string{pattern}
+ }
+
+ template := re.ReplaceAllString(pattern, "%d")
+
+ var result []string
+ for val := from; val <= to; val++ {
+ entry := fmt.Sprintf(template, val)
+ result = append(result, entry)
+ }
+
+ return result
+}
diff --git a/vendor/github.com/docker/docker/pkg/discovery/generator_test.go b/vendor/github.com/docker/docker/pkg/discovery/generator_test.go
new file mode 100644
index 00000000000..6281c466652
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/discovery/generator_test.go
@@ -0,0 +1,53 @@
+package discovery
+
+import (
+ "github.com/go-check/check"
+)
+
+func (s *DiscoverySuite) TestGeneratorNotGenerate(c *check.C) {
+ ips := Generate("127.0.0.1")
+ c.Assert(len(ips), check.Equals, 1)
+ c.Assert(ips[0], check.Equals, "127.0.0.1")
+}
+
+func (s *DiscoverySuite) TestGeneratorWithPortNotGenerate(c *check.C) {
+ ips := Generate("127.0.0.1:8080")
+ c.Assert(len(ips), check.Equals, 1)
+ c.Assert(ips[0], check.Equals, "127.0.0.1:8080")
+}
+
+func (s *DiscoverySuite) TestGeneratorMatchFailedNotGenerate(c *check.C) {
+ ips := Generate("127.0.0.[1]")
+ c.Assert(len(ips), check.Equals, 1)
+ c.Assert(ips[0], check.Equals, "127.0.0.[1]")
+}
+
+func (s *DiscoverySuite) TestGeneratorWithPort(c *check.C) {
+ ips := Generate("127.0.0.[1:11]:2375")
+ c.Assert(len(ips), check.Equals, 11)
+ c.Assert(ips[0], check.Equals, "127.0.0.1:2375")
+ c.Assert(ips[1], check.Equals, "127.0.0.2:2375")
+ c.Assert(ips[2], check.Equals, "127.0.0.3:2375")
+ c.Assert(ips[3], check.Equals, "127.0.0.4:2375")
+ c.Assert(ips[4], check.Equals, "127.0.0.5:2375")
+ c.Assert(ips[5], check.Equals, "127.0.0.6:2375")
+ c.Assert(ips[6], check.Equals, "127.0.0.7:2375")
+ c.Assert(ips[7], check.Equals, "127.0.0.8:2375")
+ c.Assert(ips[8], check.Equals, "127.0.0.9:2375")
+ c.Assert(ips[9], check.Equals, "127.0.0.10:2375")
+ c.Assert(ips[10], check.Equals, "127.0.0.11:2375")
+}
+
+func (s *DiscoverySuite) TestGenerateWithMalformedInputAtRangeStart(c *check.C) {
+ malformedInput := "127.0.0.[x:11]:2375"
+ ips := Generate(malformedInput)
+ c.Assert(len(ips), check.Equals, 1)
+ c.Assert(ips[0], check.Equals, malformedInput)
+}
+
+func (s *DiscoverySuite) TestGenerateWithMalformedInputAtRangeEnd(c *check.C) {
+ malformedInput := "127.0.0.[1:x]:2375"
+ ips := Generate(malformedInput)
+ c.Assert(len(ips), check.Equals, 1)
+ c.Assert(ips[0], check.Equals, malformedInput)
+}
diff --git a/vendor/github.com/docker/docker/pkg/discovery/kv/kv.go b/vendor/github.com/docker/docker/pkg/discovery/kv/kv.go
new file mode 100644
index 00000000000..f371c0cba01
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/discovery/kv/kv.go
@@ -0,0 +1,192 @@
+package kv
+
+import (
+ "fmt"
+ "path"
+ "strings"
+ "time"
+
+ log "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/discovery"
+ "github.com/docker/go-connections/tlsconfig"
+ "github.com/docker/libkv"
+ "github.com/docker/libkv/store"
+ "github.com/docker/libkv/store/consul"
+ "github.com/docker/libkv/store/etcd"
+ "github.com/docker/libkv/store/zookeeper"
+)
+
+const (
+ defaultDiscoveryPath = "docker/nodes"
+)
+
+// Discovery is exported
+type Discovery struct {
+ backend store.Backend
+ store store.Store
+ heartbeat time.Duration
+ ttl time.Duration
+ prefix string
+ path string
+}
+
+func init() {
+ Init()
+}
+
+// Init is exported
+func Init() {
+ // Register to libkv
+ zookeeper.Register()
+ consul.Register()
+ etcd.Register()
+
+ // Register to internal discovery service
+ discovery.Register("zk", &Discovery{backend: store.ZK})
+ discovery.Register("consul", &Discovery{backend: store.CONSUL})
+ discovery.Register("etcd", &Discovery{backend: store.ETCD})
+}
+
+// Initialize is exported
+func (s *Discovery) Initialize(uris string, heartbeat time.Duration, ttl time.Duration, clusterOpts map[string]string) error {
+ var (
+ parts = strings.SplitN(uris, "/", 2)
+ addrs = strings.Split(parts[0], ",")
+ err error
+ )
+
+ // A custom prefix to the path can be optionally used.
+ if len(parts) == 2 {
+ s.prefix = parts[1]
+ }
+
+ s.heartbeat = heartbeat
+ s.ttl = ttl
+
+ // Use a custom path if specified in discovery options
+ dpath := defaultDiscoveryPath
+ if clusterOpts["kv.path"] != "" {
+ dpath = clusterOpts["kv.path"]
+ }
+
+ s.path = path.Join(s.prefix, dpath)
+
+ var config *store.Config
+ if clusterOpts["kv.cacertfile"] != "" && clusterOpts["kv.certfile"] != "" && clusterOpts["kv.keyfile"] != "" {
+ log.Info("Initializing discovery with TLS")
+ tlsConfig, err := tlsconfig.Client(tlsconfig.Options{
+ CAFile: clusterOpts["kv.cacertfile"],
+ CertFile: clusterOpts["kv.certfile"],
+ KeyFile: clusterOpts["kv.keyfile"],
+ })
+ if err != nil {
+ return err
+ }
+ config = &store.Config{
+ // Set ClientTLS to trigger https (bug in libkv/etcd)
+ ClientTLS: &store.ClientTLSConfig{
+ CACertFile: clusterOpts["kv.cacertfile"],
+ CertFile: clusterOpts["kv.certfile"],
+ KeyFile: clusterOpts["kv.keyfile"],
+ },
+ // The actual TLS config that will be used
+ TLS: tlsConfig,
+ }
+ } else {
+ log.Info("Initializing discovery without TLS")
+ }
+
+ // Creates a new store, will ignore options given
+ // if not supported by the chosen store
+ s.store, err = libkv.NewStore(s.backend, addrs, config)
+ return err
+}
+
+// Watch the store until either there's a store error or we receive a stop request.
+// Returns false if we shouldn't attempt watching the store anymore (stop request received).
+func (s *Discovery) watchOnce(stopCh <-chan struct{}, watchCh <-chan []*store.KVPair, discoveryCh chan discovery.Entries, errCh chan error) bool {
+ for {
+ select {
+ case pairs := <-watchCh:
+ if pairs == nil {
+ return true
+ }
+
+ log.WithField("discovery", s.backend).Debugf("Watch triggered with %d nodes", len(pairs))
+
+ // Convert `KVPair` into `discovery.Entry`.
+ addrs := make([]string, len(pairs))
+ for _, pair := range pairs {
+ addrs = append(addrs, string(pair.Value))
+ }
+
+ entries, err := discovery.CreateEntries(addrs)
+ if err != nil {
+ errCh <- err
+ } else {
+ discoveryCh <- entries
+ }
+ case <-stopCh:
+ // We were requested to stop watching.
+ return false
+ }
+ }
+}
+
+// Watch is exported
+func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) {
+ ch := make(chan discovery.Entries)
+ errCh := make(chan error)
+
+ go func() {
+ defer close(ch)
+ defer close(errCh)
+
+ // Forever: Create a store watch, watch until we get an error and then try again.
+ // Will only stop if we receive a stopCh request.
+ for {
+ // Create the path to watch if it does not exist yet
+ exists, err := s.store.Exists(s.path)
+ if err != nil {
+ errCh <- err
+ }
+ if !exists {
+ if err := s.store.Put(s.path, []byte(""), &store.WriteOptions{IsDir: true}); err != nil {
+ errCh <- err
+ }
+ }
+
+ // Set up a watch.
+ watchCh, err := s.store.WatchTree(s.path, stopCh)
+ if err != nil {
+ errCh <- err
+ } else {
+ if !s.watchOnce(stopCh, watchCh, ch, errCh) {
+ return
+ }
+ }
+
+ // If we get here it means the store watch channel was closed. This
+ // is unexpected so let's retry later.
+ errCh <- fmt.Errorf("Unexpected watch error")
+ time.Sleep(s.heartbeat)
+ }
+ }()
+ return ch, errCh
+}
+
+// Register is exported
+func (s *Discovery) Register(addr string) error {
+ opts := &store.WriteOptions{TTL: s.ttl}
+ return s.store.Put(path.Join(s.path, addr), []byte(addr), opts)
+}
+
+// Store returns the underlying store used by KV discovery.
+func (s *Discovery) Store() store.Store {
+ return s.store
+}
+
+// Prefix returns the store prefix
+func (s *Discovery) Prefix() string {
+ return s.prefix
+}
diff --git a/vendor/github.com/docker/docker/pkg/discovery/kv/kv_test.go b/vendor/github.com/docker/docker/pkg/discovery/kv/kv_test.go
new file mode 100644
index 00000000000..dab3939dd04
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/discovery/kv/kv_test.go
@@ -0,0 +1,324 @@
+package kv
+
+import (
+ "errors"
+ "io/ioutil"
+ "os"
+ "path"
+ "testing"
+ "time"
+
+ "github.com/docker/docker/pkg/discovery"
+ "github.com/docker/libkv"
+ "github.com/docker/libkv/store"
+
+ "github.com/go-check/check"
+)
+
+// Hook up gocheck into the "go test" runner.
+func Test(t *testing.T) { check.TestingT(t) }
+
+type DiscoverySuite struct{}
+
+var _ = check.Suite(&DiscoverySuite{})
+
+func (ds *DiscoverySuite) TestInitialize(c *check.C) {
+ storeMock := &FakeStore{
+ Endpoints: []string{"127.0.0.1"},
+ }
+ d := &Discovery{backend: store.CONSUL}
+ d.Initialize("127.0.0.1", 0, 0, nil)
+ d.store = storeMock
+
+ s := d.store.(*FakeStore)
+ c.Assert(s.Endpoints, check.HasLen, 1)
+ c.Assert(s.Endpoints[0], check.Equals, "127.0.0.1")
+ c.Assert(d.path, check.Equals, defaultDiscoveryPath)
+
+ storeMock = &FakeStore{
+ Endpoints: []string{"127.0.0.1:1234"},
+ }
+ d = &Discovery{backend: store.CONSUL}
+ d.Initialize("127.0.0.1:1234/path", 0, 0, nil)
+ d.store = storeMock
+
+ s = d.store.(*FakeStore)
+ c.Assert(s.Endpoints, check.HasLen, 1)
+ c.Assert(s.Endpoints[0], check.Equals, "127.0.0.1:1234")
+ c.Assert(d.path, check.Equals, "path/"+defaultDiscoveryPath)
+
+ storeMock = &FakeStore{
+ Endpoints: []string{"127.0.0.1:1234", "127.0.0.2:1234", "127.0.0.3:1234"},
+ }
+ d = &Discovery{backend: store.CONSUL}
+ d.Initialize("127.0.0.1:1234,127.0.0.2:1234,127.0.0.3:1234/path", 0, 0, nil)
+ d.store = storeMock
+
+ s = d.store.(*FakeStore)
+ c.Assert(s.Endpoints, check.HasLen, 3)
+ c.Assert(s.Endpoints[0], check.Equals, "127.0.0.1:1234")
+ c.Assert(s.Endpoints[1], check.Equals, "127.0.0.2:1234")
+ c.Assert(s.Endpoints[2], check.Equals, "127.0.0.3:1234")
+
+ c.Assert(d.path, check.Equals, "path/"+defaultDiscoveryPath)
+}
+
+// Extremely limited mock store so we can test initialization
+type Mock struct {
+ // Endpoints passed to InitializeMock
+ Endpoints []string
+
+ // Options passed to InitializeMock
+ Options *store.Config
+}
+
+func NewMock(endpoints []string, options *store.Config) (store.Store, error) {
+ s := &Mock{}
+ s.Endpoints = endpoints
+ s.Options = options
+ return s, nil
+}
+func (s *Mock) Put(key string, value []byte, opts *store.WriteOptions) error {
+ return errors.New("Put not supported")
+}
+func (s *Mock) Get(key string) (*store.KVPair, error) {
+ return nil, errors.New("Get not supported")
+}
+func (s *Mock) Delete(key string) error {
+ return errors.New("Delete not supported")
+}
+
+// Exists mock
+func (s *Mock) Exists(key string) (bool, error) {
+ return false, errors.New("Exists not supported")
+}
+
+// Watch mock
+func (s *Mock) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) {
+ return nil, errors.New("Watch not supported")
+}
+
+// WatchTree mock
+func (s *Mock) WatchTree(prefix string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) {
+ return nil, errors.New("WatchTree not supported")
+}
+
+// NewLock mock
+func (s *Mock) NewLock(key string, options *store.LockOptions) (store.Locker, error) {
+ return nil, errors.New("NewLock not supported")
+}
+
+// List mock
+func (s *Mock) List(prefix string) ([]*store.KVPair, error) {
+ return nil, errors.New("List not supported")
+}
+
+// DeleteTree mock
+func (s *Mock) DeleteTree(prefix string) error {
+ return errors.New("DeleteTree not supported")
+}
+
+// AtomicPut mock
+func (s *Mock) AtomicPut(key string, value []byte, previous *store.KVPair, opts *store.WriteOptions) (bool, *store.KVPair, error) {
+ return false, nil, errors.New("AtomicPut not supported")
+}
+
+// AtomicDelete mock
+func (s *Mock) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
+ return false, errors.New("AtomicDelete not supported")
+}
+
+// Close mock
+func (s *Mock) Close() {
+ return
+}
+
+func (ds *DiscoverySuite) TestInitializeWithCerts(c *check.C) {
+ cert := `-----BEGIN CERTIFICATE-----
+MIIDCDCCAfKgAwIBAgIICifG7YeiQOEwCwYJKoZIhvcNAQELMBIxEDAOBgNVBAMT
+B1Rlc3QgQ0EwHhcNMTUxMDAxMjMwMDAwWhcNMjAwOTI5MjMwMDAwWjASMRAwDgYD
+VQQDEwdUZXN0IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1wRC
+O+flnLTK5ImjTurNRHwSejuqGbc4CAvpB0hS+z0QlSs4+zE9h80aC4hz+6caRpds
++J908Q+RvAittMHbpc7VjbZP72G6fiXk7yPPl6C10HhRSoSi3nY+B7F2E8cuz14q
+V2e+ejhWhSrBb/keyXpcyjoW1BOAAJ2TIclRRkICSCZrpXUyXxAvzXfpFXo1RhSb
+UywN11pfiCQzDUN7sPww9UzFHuAHZHoyfTr27XnJYVUerVYrCPq8vqfn//01qz55
+Xs0hvzGdlTFXhuabFtQnKFH5SNwo/fcznhB7rePOwHojxOpXTBepUCIJLbtNnWFT
+V44t9gh5IqIWtoBReQIDAQABo2YwZDAOBgNVHQ8BAf8EBAMCAAYwEgYDVR0TAQH/
+BAgwBgEB/wIBAjAdBgNVHQ4EFgQUZKUI8IIjIww7X/6hvwggQK4bD24wHwYDVR0j
+BBgwFoAUZKUI8IIjIww7X/6hvwggQK4bD24wCwYJKoZIhvcNAQELA4IBAQDES2cz
+7sCQfDCxCIWH7X8kpi/JWExzUyQEJ0rBzN1m3/x8ySRxtXyGekimBqQwQdFqlwMI
+xzAQKkh3ue8tNSzRbwqMSyH14N1KrSxYS9e9szJHfUasoTpQGPmDmGIoRJuq1h6M
+ej5x1SCJ7GWCR6xEXKUIE9OftXm9TdFzWa7Ja3OHz/mXteii8VXDuZ5ACq6EE5bY
+8sP4gcICfJ5fTrpTlk9FIqEWWQrCGa5wk95PGEj+GJpNogjXQ97wVoo/Y3p1brEn
+t5zjN9PAq4H1fuCMdNNA+p1DHNwd+ELTxcMAnb2ajwHvV6lKPXutrTFc4umJToBX
+FpTxDmJHEV4bzUzh
+-----END CERTIFICATE-----
+`
+ key := `-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEA1wRCO+flnLTK5ImjTurNRHwSejuqGbc4CAvpB0hS+z0QlSs4
++zE9h80aC4hz+6caRpds+J908Q+RvAittMHbpc7VjbZP72G6fiXk7yPPl6C10HhR
+SoSi3nY+B7F2E8cuz14qV2e+ejhWhSrBb/keyXpcyjoW1BOAAJ2TIclRRkICSCZr
+pXUyXxAvzXfpFXo1RhSbUywN11pfiCQzDUN7sPww9UzFHuAHZHoyfTr27XnJYVUe
+rVYrCPq8vqfn//01qz55Xs0hvzGdlTFXhuabFtQnKFH5SNwo/fcznhB7rePOwHoj
+xOpXTBepUCIJLbtNnWFTV44t9gh5IqIWtoBReQIDAQABAoIBAHSWipORGp/uKFXj
+i/mut776x8ofsAxhnLBARQr93ID+i49W8H7EJGkOfaDjTICYC1dbpGrri61qk8sx
+qX7p3v/5NzKwOIfEpirgwVIqSNYe/ncbxnhxkx6tXtUtFKmEx40JskvSpSYAhmmO
+1XSx0E/PWaEN/nLgX/f1eWJIlxlQkk3QeqL+FGbCXI48DEtlJ9+MzMu4pAwZTpj5
+5qtXo5JJ0jRGfJVPAOznRsYqv864AhMdMIWguzk6EGnbaCWwPcfcn+h9a5LMdony
+MDHfBS7bb5tkF3+AfnVY3IBMVx7YlsD9eAyajlgiKu4zLbwTRHjXgShy+4Oussz0
+ugNGnkECgYEA/hi+McrZC8C4gg6XqK8+9joD8tnyDZDz88BQB7CZqABUSwvjDqlP
+L8hcwo/lzvjBNYGkqaFPUICGWKjeCtd8pPS2DCVXxDQX4aHF1vUur0uYNncJiV3N
+XQz4Iemsa6wnKf6M67b5vMXICw7dw0HZCdIHD1hnhdtDz0uVpeevLZ8CgYEA2KCT
+Y43lorjrbCgMqtlefkr3GJA9dey+hTzCiWEOOqn9RqGoEGUday0sKhiLofOgmN2B
+LEukpKIey8s+Q/cb6lReajDVPDsMweX8i7hz3Wa4Ugp4Xa5BpHqu8qIAE2JUZ7bU
+t88aQAYE58pUF+/Lq1QzAQdrjjzQBx6SrBxieecCgYEAvukoPZEC8mmiN1VvbTX+
+QFHmlZha3QaDxChB+QUe7bMRojEUL/fVnzkTOLuVFqSfxevaI/km9n0ac5KtAchV
+xjp2bTnBb5EUQFqjopYktWA+xO07JRJtMfSEmjZPbbay1kKC7rdTfBm961EIHaRj
+xZUf6M+rOE8964oGrdgdLlECgYEA046GQmx6fh7/82FtdZDRQp9tj3SWQUtSiQZc
+qhO59Lq8mjUXz+MgBuJXxkiwXRpzlbaFB0Bca1fUoYw8o915SrDYf/Zu2OKGQ/qa
+V81sgiVmDuEgycR7YOlbX6OsVUHrUlpwhY3hgfMe6UtkMvhBvHF/WhroBEIJm1pV
+PXZ/CbMCgYEApNWVktFBjOaYfY6SNn4iSts1jgsQbbpglg3kT7PLKjCAhI6lNsbk
+dyT7ut01PL6RaW4SeQWtrJIVQaM6vF3pprMKqlc5XihOGAmVqH7rQx9rtQB5TicL
+BFrwkQE4HQtQBV60hYQUzzlSk44VFDz+jxIEtacRHaomDRh2FtOTz+I=
+-----END RSA PRIVATE KEY-----
+`
+ certFile, err := ioutil.TempFile("", "cert")
+ c.Assert(err, check.IsNil)
+ defer os.Remove(certFile.Name())
+ certFile.Write([]byte(cert))
+ certFile.Close()
+ keyFile, err := ioutil.TempFile("", "key")
+ c.Assert(err, check.IsNil)
+ defer os.Remove(keyFile.Name())
+ keyFile.Write([]byte(key))
+ keyFile.Close()
+
+ libkv.AddStore("mock", NewMock)
+ d := &Discovery{backend: "mock"}
+ err = d.Initialize("127.0.0.3:1234", 0, 0, map[string]string{
+ "kv.cacertfile": certFile.Name(),
+ "kv.certfile": certFile.Name(),
+ "kv.keyfile": keyFile.Name(),
+ })
+ c.Assert(err, check.IsNil)
+ s := d.store.(*Mock)
+ c.Assert(s.Options.TLS, check.NotNil)
+ c.Assert(s.Options.TLS.RootCAs, check.NotNil)
+ c.Assert(s.Options.TLS.Certificates, check.HasLen, 1)
+}
+
+func (ds *DiscoverySuite) TestWatch(c *check.C) {
+ mockCh := make(chan []*store.KVPair)
+
+ storeMock := &FakeStore{
+ Endpoints: []string{"127.0.0.1:1234"},
+ mockKVChan: mockCh,
+ }
+
+ d := &Discovery{backend: store.CONSUL}
+ d.Initialize("127.0.0.1:1234/path", 0, 0, nil)
+ d.store = storeMock
+
+ expected := discovery.Entries{
+ &discovery.Entry{Host: "1.1.1.1", Port: "1111"},
+ &discovery.Entry{Host: "2.2.2.2", Port: "2222"},
+ }
+ kvs := []*store.KVPair{
+ {Key: path.Join("path", defaultDiscoveryPath, "1.1.1.1"), Value: []byte("1.1.1.1:1111")},
+ {Key: path.Join("path", defaultDiscoveryPath, "2.2.2.2"), Value: []byte("2.2.2.2:2222")},
+ }
+
+ stopCh := make(chan struct{})
+ ch, errCh := d.Watch(stopCh)
+
+ // It should fire an error since the first WatchTree call failed.
+ c.Assert(<-errCh, check.ErrorMatches, "test error")
+ // We have to drain the error channel otherwise Watch will get stuck.
+ go func() {
+ for range errCh {
+ }
+ }()
+
+ // Push the entries into the store channel and make sure discovery emits.
+ mockCh <- kvs
+ c.Assert(<-ch, check.DeepEquals, expected)
+
+ // Add a new entry.
+ expected = append(expected, &discovery.Entry{Host: "3.3.3.3", Port: "3333"})
+ kvs = append(kvs, &store.KVPair{Key: path.Join("path", defaultDiscoveryPath, "3.3.3.3"), Value: []byte("3.3.3.3:3333")})
+ mockCh <- kvs
+ c.Assert(<-ch, check.DeepEquals, expected)
+
+ close(mockCh)
+ // Give it enough time to call WatchTree.
+ time.Sleep(3 * time.Second)
+
+ // Stop and make sure it closes all channels.
+ close(stopCh)
+ c.Assert(<-ch, check.IsNil)
+ c.Assert(<-errCh, check.IsNil)
+}
+
+// FakeStore implements store.Store methods. It mocks all store
+// function in a simple, naive way.
+type FakeStore struct {
+ Endpoints []string
+ Options *store.Config
+ mockKVChan <-chan []*store.KVPair
+
+ watchTreeCallCount int
+}
+
+func (s *FakeStore) Put(key string, value []byte, options *store.WriteOptions) error {
+ return nil
+}
+
+func (s *FakeStore) Get(key string) (*store.KVPair, error) {
+ return nil, nil
+}
+
+func (s *FakeStore) Delete(key string) error {
+ return nil
+}
+
+func (s *FakeStore) Exists(key string) (bool, error) {
+ return true, nil
+}
+
+func (s *FakeStore) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) {
+ return nil, nil
+}
+
+// WatchTree will fail the first time, and return the mockKVchan afterwards.
+// This is the behavior we need for testing.. If we need 'moar', should update this.
+func (s *FakeStore) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) {
+ if s.watchTreeCallCount == 0 {
+ s.watchTreeCallCount = 1
+ return nil, errors.New("test error")
+ }
+ // First calls error
+ return s.mockKVChan, nil
+}
+
+func (s *FakeStore) NewLock(key string, options *store.LockOptions) (store.Locker, error) {
+ return nil, nil
+}
+
+func (s *FakeStore) List(directory string) ([]*store.KVPair, error) {
+ return []*store.KVPair{}, nil
+}
+
+func (s *FakeStore) DeleteTree(directory string) error {
+ return nil
+}
+
+func (s *FakeStore) AtomicPut(key string, value []byte, previous *store.KVPair, options *store.WriteOptions) (bool, *store.KVPair, error) {
+ return true, nil, nil
+}
+
+func (s *FakeStore) AtomicDelete(key string, previous *store.KVPair) (bool, error) {
+ return true, nil
+}
+
+func (s *FakeStore) Close() {
+}
diff --git a/vendor/github.com/docker/docker/pkg/discovery/memory/memory.go b/vendor/github.com/docker/docker/pkg/discovery/memory/memory.go
new file mode 100644
index 00000000000..ba8b1f55f3e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/discovery/memory/memory.go
@@ -0,0 +1,93 @@
+package memory
+
+import (
+ "sync"
+ "time"
+
+ "github.com/docker/docker/pkg/discovery"
+)
+
+// Discovery implements a discovery backend that keeps
+// data in memory.
+type Discovery struct {
+ heartbeat time.Duration
+ values []string
+ mu sync.Mutex
+}
+
+func init() {
+ Init()
+}
+
+// Init registers the memory backend on demand.
+func Init() {
+ discovery.Register("memory", &Discovery{})
+}
+
+// Initialize sets the heartbeat for the memory backend.
+func (s *Discovery) Initialize(_ string, heartbeat time.Duration, _ time.Duration, _ map[string]string) error {
+ s.heartbeat = heartbeat
+ s.values = make([]string, 0)
+ return nil
+}
+
+// Watch sends periodic discovery updates to a channel.
+func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) {
+ ch := make(chan discovery.Entries)
+ errCh := make(chan error)
+ ticker := time.NewTicker(s.heartbeat)
+
+ go func() {
+ defer close(errCh)
+ defer close(ch)
+
+ // Send the initial entries if available.
+ var currentEntries discovery.Entries
+ var err error
+
+ s.mu.Lock()
+ if len(s.values) > 0 {
+ currentEntries, err = discovery.CreateEntries(s.values)
+ }
+ s.mu.Unlock()
+
+ if err != nil {
+ errCh <- err
+ } else if currentEntries != nil {
+ ch <- currentEntries
+ }
+
+ // Periodically send updates.
+ for {
+ select {
+ case <-ticker.C:
+ s.mu.Lock()
+ newEntries, err := discovery.CreateEntries(s.values)
+ s.mu.Unlock()
+ if err != nil {
+ errCh <- err
+ continue
+ }
+
+ // Check if the file has really changed.
+ if !newEntries.Equals(currentEntries) {
+ ch <- newEntries
+ }
+ currentEntries = newEntries
+ case <-stopCh:
+ ticker.Stop()
+ return
+ }
+ }
+ }()
+
+ return ch, errCh
+}
+
+// Register adds a new address to the discovery.
+func (s *Discovery) Register(addr string) error {
+ s.mu.Lock()
+ s.values = append(s.values, addr)
+ s.mu.Unlock()
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/discovery/memory/memory_test.go b/vendor/github.com/docker/docker/pkg/discovery/memory/memory_test.go
new file mode 100644
index 00000000000..c2da0a068e8
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/discovery/memory/memory_test.go
@@ -0,0 +1,48 @@
+package memory
+
+import (
+ "testing"
+
+ "github.com/docker/docker/pkg/discovery"
+ "github.com/go-check/check"
+)
+
+// Hook up gocheck into the "go test" runner.
+func Test(t *testing.T) { check.TestingT(t) }
+
+type discoverySuite struct{}
+
+var _ = check.Suite(&discoverySuite{})
+
+func (s *discoverySuite) TestWatch(c *check.C) {
+ d := &Discovery{}
+ d.Initialize("foo", 1000, 0, nil)
+ stopCh := make(chan struct{})
+ ch, errCh := d.Watch(stopCh)
+
+ // We have to drain the error channel otherwise Watch will get stuck.
+ go func() {
+ for range errCh {
+ }
+ }()
+
+ expected := discovery.Entries{
+ &discovery.Entry{Host: "1.1.1.1", Port: "1111"},
+ }
+
+ c.Assert(d.Register("1.1.1.1:1111"), check.IsNil)
+ c.Assert(<-ch, check.DeepEquals, expected)
+
+ expected = discovery.Entries{
+ &discovery.Entry{Host: "1.1.1.1", Port: "1111"},
+ &discovery.Entry{Host: "2.2.2.2", Port: "2222"},
+ }
+
+ c.Assert(d.Register("2.2.2.2:2222"), check.IsNil)
+ c.Assert(<-ch, check.DeepEquals, expected)
+
+ // Stop and make sure it closes all channels.
+ close(stopCh)
+ c.Assert(<-ch, check.IsNil)
+ c.Assert(<-errCh, check.IsNil)
+}
diff --git a/vendor/github.com/docker/docker/pkg/discovery/nodes/nodes.go b/vendor/github.com/docker/docker/pkg/discovery/nodes/nodes.go
new file mode 100644
index 00000000000..c0e3c07b224
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/discovery/nodes/nodes.go
@@ -0,0 +1,54 @@
+package nodes
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/docker/docker/pkg/discovery"
+)
+
+// Discovery is exported
+type Discovery struct {
+ entries discovery.Entries
+}
+
+func init() {
+ Init()
+}
+
+// Init is exported
+func Init() {
+ discovery.Register("nodes", &Discovery{})
+}
+
+// Initialize is exported
+func (s *Discovery) Initialize(uris string, _ time.Duration, _ time.Duration, _ map[string]string) error {
+ for _, input := range strings.Split(uris, ",") {
+ for _, ip := range discovery.Generate(input) {
+ entry, err := discovery.NewEntry(ip)
+ if err != nil {
+ return fmt.Errorf("%s, please check you are using the correct discovery (missing token:// ?)", err.Error())
+ }
+ s.entries = append(s.entries, entry)
+ }
+ }
+
+ return nil
+}
+
+// Watch is exported
+func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) {
+ ch := make(chan discovery.Entries)
+ go func() {
+ defer close(ch)
+ ch <- s.entries
+ <-stopCh
+ }()
+ return ch, nil
+}
+
+// Register is exported
+func (s *Discovery) Register(addr string) error {
+ return discovery.ErrNotImplemented
+}
diff --git a/vendor/github.com/docker/docker/pkg/discovery/nodes/nodes_test.go b/vendor/github.com/docker/docker/pkg/discovery/nodes/nodes_test.go
new file mode 100644
index 00000000000..e26568cf54f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/discovery/nodes/nodes_test.go
@@ -0,0 +1,51 @@
+package nodes
+
+import (
+ "testing"
+
+ "github.com/docker/docker/pkg/discovery"
+
+ "github.com/go-check/check"
+)
+
+// Hook up gocheck into the "go test" runner.
+func Test(t *testing.T) { check.TestingT(t) }
+
+type DiscoverySuite struct{}
+
+var _ = check.Suite(&DiscoverySuite{})
+
+func (s *DiscoverySuite) TestInitialize(c *check.C) {
+ d := &Discovery{}
+ d.Initialize("1.1.1.1:1111,2.2.2.2:2222", 0, 0, nil)
+ c.Assert(len(d.entries), check.Equals, 2)
+ c.Assert(d.entries[0].String(), check.Equals, "1.1.1.1:1111")
+ c.Assert(d.entries[1].String(), check.Equals, "2.2.2.2:2222")
+}
+
+func (s *DiscoverySuite) TestInitializeWithPattern(c *check.C) {
+ d := &Discovery{}
+ d.Initialize("1.1.1.[1:2]:1111,2.2.2.[2:4]:2222", 0, 0, nil)
+ c.Assert(len(d.entries), check.Equals, 5)
+ c.Assert(d.entries[0].String(), check.Equals, "1.1.1.1:1111")
+ c.Assert(d.entries[1].String(), check.Equals, "1.1.1.2:1111")
+ c.Assert(d.entries[2].String(), check.Equals, "2.2.2.2:2222")
+ c.Assert(d.entries[3].String(), check.Equals, "2.2.2.3:2222")
+ c.Assert(d.entries[4].String(), check.Equals, "2.2.2.4:2222")
+}
+
+func (s *DiscoverySuite) TestWatch(c *check.C) {
+ d := &Discovery{}
+ d.Initialize("1.1.1.1:1111,2.2.2.2:2222", 0, 0, nil)
+ expected := discovery.Entries{
+ &discovery.Entry{Host: "1.1.1.1", Port: "1111"},
+ &discovery.Entry{Host: "2.2.2.2", Port: "2222"},
+ }
+ ch, _ := d.Watch(nil)
+ c.Assert(expected.Equals(<-ch), check.Equals, true)
+}
+
+func (s *DiscoverySuite) TestRegister(c *check.C) {
+ d := &Discovery{}
+ c.Assert(d.Register("0.0.0.0"), check.NotNil)
+}
diff --git a/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go b/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go
new file mode 100644
index 00000000000..23befae6789
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go
@@ -0,0 +1,40 @@
+// Package filenotify provides a mechanism for watching file(s) for changes.
+// Generally leans on fsnotify, but provides a poll-based notifier which fsnotify does not support.
+// These are wrapped up in a common interface so that either can be used interchangeably in your code.
+package filenotify
+
+import "gopkg.in/fsnotify.v1"
+
+// FileWatcher is an interface for implementing file notification watchers
+type FileWatcher interface {
+ Events() <-chan fsnotify.Event
+ Errors() <-chan error
+ Add(name string) error
+ Remove(name string) error
+ Close() error
+}
+
+// New tries to use an fs-event watcher, and falls back to the poller if there is an error
+func New() (FileWatcher, error) {
+ if watcher, err := NewEventWatcher(); err == nil {
+ return watcher, nil
+ }
+ return NewPollingWatcher(), nil
+}
+
+// NewPollingWatcher returns a poll-based file watcher
+func NewPollingWatcher() FileWatcher {
+ return &filePoller{
+ events: make(chan fsnotify.Event),
+ errors: make(chan error),
+ }
+}
+
+// NewEventWatcher returns an fs-event based file watcher
+func NewEventWatcher() (FileWatcher, error) {
+ watcher, err := fsnotify.NewWatcher()
+ if err != nil {
+ return nil, err
+ }
+ return &fsNotifyWatcher{watcher}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go b/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go
new file mode 100644
index 00000000000..dba2fc37567
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go
@@ -0,0 +1,18 @@
+package filenotify
+
+import "gopkg.in/fsnotify.v1"
+
+// fsNotifyWatcher wraps the fsnotify package to satisfy the FileNotifer interface
+type fsNotifyWatcher struct {
+ *fsnotify.Watcher
+}
+
+// Events returns the fsnotify event channel receiver
+func (w *fsNotifyWatcher) Events() <-chan fsnotify.Event {
+ return w.Watcher.Events
+}
+
+// Errors returns the fsnotify error channel receiver
+func (w *fsNotifyWatcher) Errors() <-chan error {
+ return w.Watcher.Errors
+}
diff --git a/vendor/github.com/docker/docker/pkg/filenotify/poller.go b/vendor/github.com/docker/docker/pkg/filenotify/poller.go
new file mode 100644
index 00000000000..5261085346f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/filenotify/poller.go
@@ -0,0 +1,204 @@
+package filenotify
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "sync"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+
+ "gopkg.in/fsnotify.v1"
+)
+
+var (
+ // errPollerClosed is returned when the poller is closed
+ errPollerClosed = errors.New("poller is closed")
+ // errNoSuchPoller is returned when trying to remove a watch that doesn't exist
+ errNoSuchWatch = errors.New("poller does not exist")
+)
+
+// watchWaitTime is the time to wait between file poll loops
+const watchWaitTime = 200 * time.Millisecond
+
+// filePoller is used to poll files for changes, especially in cases where fsnotify
+// can't be run (e.g. when inotify handles are exhausted)
+// filePoller satisfies the FileWatcher interface
+type filePoller struct {
+ // watches is the list of files currently being polled, close the associated channel to stop the watch
+ watches map[string]chan struct{}
+ // events is the channel to listen to for watch events
+ events chan fsnotify.Event
+ // errors is the channel to listen to for watch errors
+ errors chan error
+ // mu locks the poller for modification
+ mu sync.Mutex
+ // closed is used to specify when the poller has already closed
+ closed bool
+}
+
+// Add adds a filename to the list of watches
+// once added the file is polled for changes in a separate goroutine
+func (w *filePoller) Add(name string) error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ if w.closed == true {
+ return errPollerClosed
+ }
+
+ f, err := os.Open(name)
+ if err != nil {
+ return err
+ }
+ fi, err := os.Stat(name)
+ if err != nil {
+ return err
+ }
+
+ if w.watches == nil {
+ w.watches = make(map[string]chan struct{})
+ }
+ if _, exists := w.watches[name]; exists {
+ return fmt.Errorf("watch exists")
+ }
+ chClose := make(chan struct{})
+ w.watches[name] = chClose
+
+ go w.watch(f, fi, chClose)
+ return nil
+}
+
+// Remove stops and removes watch with the specified name
+func (w *filePoller) Remove(name string) error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ return w.remove(name)
+}
+
+func (w *filePoller) remove(name string) error {
+ if w.closed == true {
+ return errPollerClosed
+ }
+
+ chClose, exists := w.watches[name]
+ if !exists {
+ return errNoSuchWatch
+ }
+ close(chClose)
+ delete(w.watches, name)
+ return nil
+}
+
+// Events returns the event channel
+// This is used for notifications on events about watched files
+func (w *filePoller) Events() <-chan fsnotify.Event {
+ return w.events
+}
+
+// Errors returns the errors channel
+// This is used for notifications about errors on watched files
+func (w *filePoller) Errors() <-chan error {
+ return w.errors
+}
+
+// Close closes the poller
+// All watches are stopped, removed, and the poller cannot be added to
+func (w *filePoller) Close() error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ if w.closed {
+ return nil
+ }
+
+ w.closed = true
+ for name := range w.watches {
+ w.remove(name)
+ delete(w.watches, name)
+ }
+ return nil
+}
+
+// sendEvent publishes the specified event to the events channel
+func (w *filePoller) sendEvent(e fsnotify.Event, chClose <-chan struct{}) error {
+ select {
+ case w.events <- e:
+ case <-chClose:
+ return fmt.Errorf("closed")
+ }
+ return nil
+}
+
+// sendErr publishes the specified error to the errors channel
+func (w *filePoller) sendErr(e error, chClose <-chan struct{}) error {
+ select {
+ case w.errors <- e:
+ case <-chClose:
+ return fmt.Errorf("closed")
+ }
+ return nil
+}
+
+// watch is responsible for polling the specified file for changes
+// upon finding changes to a file or errors, sendEvent/sendErr is called
+func (w *filePoller) watch(f *os.File, lastFi os.FileInfo, chClose chan struct{}) {
+ defer f.Close()
+ for {
+ time.Sleep(watchWaitTime)
+ select {
+ case <-chClose:
+ logrus.Debugf("watch for %s closed", f.Name())
+ return
+ default:
+ }
+
+ fi, err := os.Stat(f.Name())
+ if err != nil {
+ // if we got an error here and lastFi is not set, we can presume that nothing has changed
+ // This should be safe since before `watch()` is called, a stat is performed, there is any error `watch` is not called
+ if lastFi == nil {
+ continue
+ }
+ // If it doesn't exist at this point, it must have been removed
+ // no need to send the error here since this is a valid operation
+ if os.IsNotExist(err) {
+ if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Remove, Name: f.Name()}, chClose); err != nil {
+ return
+ }
+ lastFi = nil
+ continue
+ }
+ // at this point, send the error
+ if err := w.sendErr(err, chClose); err != nil {
+ return
+ }
+ continue
+ }
+
+ if lastFi == nil {
+ if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Create, Name: fi.Name()}, chClose); err != nil {
+ return
+ }
+ lastFi = fi
+ continue
+ }
+
+ if fi.Mode() != lastFi.Mode() {
+ if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Chmod, Name: fi.Name()}, chClose); err != nil {
+ return
+ }
+ lastFi = fi
+ continue
+ }
+
+ if fi.ModTime() != lastFi.ModTime() || fi.Size() != lastFi.Size() {
+ if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Write, Name: fi.Name()}, chClose); err != nil {
+ return
+ }
+ lastFi = fi
+ continue
+ }
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/filenotify/poller_test.go b/vendor/github.com/docker/docker/pkg/filenotify/poller_test.go
new file mode 100644
index 00000000000..4f5026237c9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/filenotify/poller_test.go
@@ -0,0 +1,119 @@
+package filenotify
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "runtime"
+ "testing"
+ "time"
+
+ "gopkg.in/fsnotify.v1"
+)
+
+func TestPollerAddRemove(t *testing.T) {
+ w := NewPollingWatcher()
+
+ if err := w.Add("no-such-file"); err == nil {
+ t.Fatal("should have gotten error when adding a non-existent file")
+ }
+ if err := w.Remove("no-such-file"); err == nil {
+ t.Fatal("should have gotten error when removing non-existent watch")
+ }
+
+ f, err := ioutil.TempFile("", "asdf")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(f.Name())
+
+ if err := w.Add(f.Name()); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := w.Remove(f.Name()); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestPollerEvent(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("No chmod on Windows")
+ }
+ w := NewPollingWatcher()
+
+ f, err := ioutil.TempFile("", "test-poller")
+ if err != nil {
+ t.Fatal("error creating temp file")
+ }
+ defer os.RemoveAll(f.Name())
+ f.Close()
+
+ if err := w.Add(f.Name()); err != nil {
+ t.Fatal(err)
+ }
+
+ select {
+ case <-w.Events():
+ t.Fatal("got event before anything happened")
+ case <-w.Errors():
+ t.Fatal("got error before anything happened")
+ default:
+ }
+
+ if err := ioutil.WriteFile(f.Name(), []byte("hello"), 644); err != nil {
+ t.Fatal(err)
+ }
+ if err := assertEvent(w, fsnotify.Write); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := os.Chmod(f.Name(), 600); err != nil {
+ t.Fatal(err)
+ }
+ if err := assertEvent(w, fsnotify.Chmod); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := os.Remove(f.Name()); err != nil {
+ t.Fatal(err)
+ }
+ if err := assertEvent(w, fsnotify.Remove); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestPollerClose(t *testing.T) {
+ w := NewPollingWatcher()
+ if err := w.Close(); err != nil {
+ t.Fatal(err)
+ }
+ // test double-close
+ if err := w.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ f, err := ioutil.TempFile("", "asdf")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(f.Name())
+ if err := w.Add(f.Name()); err == nil {
+ t.Fatal("should have gotten error adding watch for closed watcher")
+ }
+}
+
+func assertEvent(w FileWatcher, eType fsnotify.Op) error {
+ var err error
+ select {
+ case e := <-w.Events():
+ if e.Op != eType {
+ err = fmt.Errorf("got wrong event type, expected %q: %v", eType, e)
+ }
+ case e := <-w.Errors():
+ err = fmt.Errorf("got unexpected error waiting for events %v: %v", eType, e)
+ case <-time.After(watchWaitTime * 3):
+ err = fmt.Errorf("timeout waiting for event %v", eType)
+ }
+ return err
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go
similarity index 54%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go
rename to vendor/github.com/docker/docker/pkg/fileutils/fileutils.go
index 1b8cadc63ff..c00a0cdee32 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go
@@ -4,20 +4,21 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
+ "regexp"
"strings"
+ "text/scanner"
- "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus"
)
-// exclusion return true if the specified pattern is an exclusion
+// exclusion returns true if the specified pattern is an exclusion
func exclusion(pattern string) bool {
return pattern[0] == '!'
}
-// empty return true if the specified pattern is empty
+// empty returns true if the specified pattern is empty
func empty(pattern string) bool {
return pattern == ""
}
@@ -30,7 +31,7 @@ func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) {
// Loop over exclusion patterns and:
// 1. Clean them up.
// 2. Indicate whether we are dealing with any exception rules.
- // 3. Error if we see a single exclusion marker on it's own (!).
+ // 3. Error if we see a single exclusion marker on its own (!).
cleanedPatterns := []string{}
patternDirs := [][]string{}
exceptions := false
@@ -51,7 +52,7 @@ func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) {
if exclusion(pattern) {
pattern = pattern[1:]
}
- patternDirs = append(patternDirs, strings.Split(pattern, "/"))
+ patternDirs = append(patternDirs, strings.Split(pattern, string(os.PathSeparator)))
}
return cleanedPatterns, patternDirs, exceptions, nil
@@ -77,13 +78,14 @@ func Matches(file string, patterns []string) (bool, error) {
// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go.
// It will assume that the inputs have been preprocessed and therefore the function
-// doen't need to do as much error checking and clean-up. This was done to avoid
+// doesn't need to do as much error checking and clean-up. This was done to avoid
// repeating these steps on each file being checked during the archive process.
// The more generic fileutils.Matches() can't make these assumptions.
func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) {
matched := false
+ file = filepath.FromSlash(file)
parentPath := filepath.Dir(file)
- parentPathDirs := strings.Split(parentPath, "/")
+ parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
for i, pattern := range patterns {
negative := false
@@ -93,16 +95,16 @@ func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool,
pattern = pattern[1:]
}
- match, err := filepath.Match(pattern, file)
+ match, err := regexpMatch(pattern, file)
if err != nil {
- return false, err
+ return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err)
}
if !match && parentPath != "." {
// Check to see if the pattern matches one of our parent dirs.
if len(patDirs[i]) <= len(parentPathDirs) {
- match, _ = filepath.Match(strings.Join(patDirs[i], "/"),
- strings.Join(parentPathDirs[:len(patDirs[i])], "/"))
+ match, _ = regexpMatch(strings.Join(patDirs[i], string(os.PathSeparator)),
+ strings.Join(parentPathDirs[:len(patDirs[i])], string(os.PathSeparator)))
}
}
@@ -118,8 +120,104 @@ func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool,
return matched, nil
}
+// regexpMatch tries to match the logic of filepath.Match but
+// does so using regexp logic. We do this so that we can expand the
+// wildcard set to include other things, like "**" to mean any number
+// of directories. This means that we should be backwards compatible
+// with filepath.Match(). We'll end up supporting more stuff, due to
+// the fact that we're using regexp, but that's ok - it does no harm.
+//
+// As per the comment in golangs filepath.Match, on Windows, escaping
+// is disabled. Instead, '\\' is treated as path separator.
+func regexpMatch(pattern, path string) (bool, error) {
+ regStr := "^"
+
+ // Do some syntax checking on the pattern.
+ // filepath's Match() has some really weird rules that are inconsistent
+ // so instead of trying to dup their logic, just call Match() for its
+ // error state and if there is an error in the pattern return it.
+ // If this becomes an issue we can remove this since its really only
+ // needed in the error (syntax) case - which isn't really critical.
+ if _, err := filepath.Match(pattern, path); err != nil {
+ return false, err
+ }
+
+ // Go through the pattern and convert it to a regexp.
+ // We use a scanner so we can support utf-8 chars.
+ var scan scanner.Scanner
+ scan.Init(strings.NewReader(pattern))
+
+ sl := string(os.PathSeparator)
+ escSL := sl
+ if sl == `\` {
+ escSL += `\`
+ }
+
+ for scan.Peek() != scanner.EOF {
+ ch := scan.Next()
+
+ if ch == '*' {
+ if scan.Peek() == '*' {
+ // is some flavor of "**"
+ scan.Next()
+
+ if scan.Peek() == scanner.EOF {
+ // is "**EOF" - to align with .gitignore just accept all
+ regStr += ".*"
+ } else {
+ // is "**"
+ regStr += "((.*" + escSL + ")|([^" + escSL + "]*))"
+ }
+
+ // Treat **/ as ** so eat the "/"
+ if string(scan.Peek()) == sl {
+ scan.Next()
+ }
+ } else {
+ // is "*" so map it to anything but "/"
+ regStr += "[^" + escSL + "]*"
+ }
+ } else if ch == '?' {
+ // "?" is any char except "/"
+ regStr += "[^" + escSL + "]"
+ } else if strings.Index(".$", string(ch)) != -1 {
+ // Escape some regexp special chars that have no meaning
+ // in golang's filepath.Match
+ regStr += `\` + string(ch)
+ } else if ch == '\\' {
+ // escape next char. Note that a trailing \ in the pattern
+ // will be left alone (but need to escape it)
+ if sl == `\` {
+ // On windows map "\" to "\\", meaning an escaped backslash,
+ // and then just continue because filepath.Match on
+ // Windows doesn't allow escaping at all
+ regStr += escSL
+ continue
+ }
+ if scan.Peek() != scanner.EOF {
+ regStr += `\` + string(scan.Next())
+ } else {
+ regStr += `\`
+ }
+ } else {
+ regStr += string(ch)
+ }
+ }
+
+ regStr += "$"
+
+ res, err := regexp.MatchString(regStr, path)
+
+ // Map regexp's error to filepath's so no one knows we're not using filepath
+ if err != nil {
+ err = filepath.ErrBadPattern
+ }
+
+ return res, err
+}
+
// CopyFile copies from src to dst until either EOF is reached
-// on src or an error occurs. It verifies src exists and remove
+// on src or an error occurs. It verifies src exists and removes
// the dst if it exists.
func CopyFile(src, dst string) (int64, error) {
cleanSrc := filepath.Clean(src)
@@ -143,17 +241,6 @@ func CopyFile(src, dst string) (int64, error) {
return io.Copy(df, sf)
}
-// GetTotalUsedFds Returns the number of used File Descriptors by
-// reading it via /proc filesystem.
-func GetTotalUsedFds() int {
- if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
- logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
- } else {
- return len(fds)
- }
- return -1
-}
-
// ReadSymlinkedDirectory returns the target directory of a symlink.
// The target of the symbolic link may not be a file.
func ReadSymlinkedDirectory(path string) (string, error) {
diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go
new file mode 100644
index 00000000000..ccd648fac30
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go
@@ -0,0 +1,27 @@
+package fileutils
+
+import (
+ "os"
+ "os/exec"
+ "strconv"
+ "strings"
+)
+
+// GetTotalUsedFds returns the number of used File Descriptors by
+// executing `lsof -p PID`
+func GetTotalUsedFds() int {
+ pid := os.Getpid()
+
+ cmd := exec.Command("lsof", "-p", strconv.Itoa(pid))
+
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return -1
+ }
+
+ outputStr := strings.TrimSpace(string(output))
+
+ fds := strings.Split(outputStr, "\n")
+
+ return len(fds) - 1
+}
diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go
new file mode 100644
index 00000000000..0f2cb7ab933
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go
@@ -0,0 +1,7 @@
+package fileutils
+
+// GetTotalUsedFds Returns the number of used File Descriptors.
+// On Solaris these limits are per process and not systemwide
+func GetTotalUsedFds() int {
+ return -1
+}
diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_test.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_test.go
new file mode 100644
index 00000000000..6df1be89bbb
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_test.go
@@ -0,0 +1,585 @@
+package fileutils
+
+import (
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+// CopyFile with invalid src
+func TestCopyFileWithInvalidSrc(t *testing.T) {
+ tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
+ defer os.RemoveAll(tempFolder)
+ if err != nil {
+ t.Fatal(err)
+ }
+ bytes, err := CopyFile("/invalid/file/path", path.Join(tempFolder, "dest"))
+ if err == nil {
+ t.Fatal("Should have fail to copy an invalid src file")
+ }
+ if bytes != 0 {
+ t.Fatal("Should have written 0 bytes")
+ }
+
+}
+
+// CopyFile with invalid dest
+func TestCopyFileWithInvalidDest(t *testing.T) {
+ tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
+ defer os.RemoveAll(tempFolder)
+ if err != nil {
+ t.Fatal(err)
+ }
+ src := path.Join(tempFolder, "file")
+ err = ioutil.WriteFile(src, []byte("content"), 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ bytes, err := CopyFile(src, path.Join(tempFolder, "/invalid/dest/path"))
+ if err == nil {
+ t.Fatal("Should have fail to copy an invalid src file")
+ }
+ if bytes != 0 {
+ t.Fatal("Should have written 0 bytes")
+ }
+
+}
+
+// CopyFile with same src and dest
+func TestCopyFileWithSameSrcAndDest(t *testing.T) {
+ tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
+ defer os.RemoveAll(tempFolder)
+ if err != nil {
+ t.Fatal(err)
+ }
+ file := path.Join(tempFolder, "file")
+ err = ioutil.WriteFile(file, []byte("content"), 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ bytes, err := CopyFile(file, file)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if bytes != 0 {
+ t.Fatal("Should have written 0 bytes as it is the same file.")
+ }
+}
+
+// CopyFile with same src and dest but path is different and not clean
+func TestCopyFileWithSameSrcAndDestWithPathNameDifferent(t *testing.T) {
+ tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
+ defer os.RemoveAll(tempFolder)
+ if err != nil {
+ t.Fatal(err)
+ }
+ testFolder := path.Join(tempFolder, "test")
+ err = os.MkdirAll(testFolder, 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ file := path.Join(testFolder, "file")
+ sameFile := testFolder + "/../test/file"
+ err = ioutil.WriteFile(file, []byte("content"), 0740)
+ if err != nil {
+ t.Fatal(err)
+ }
+ bytes, err := CopyFile(file, sameFile)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if bytes != 0 {
+ t.Fatal("Should have written 0 bytes as it is the same file.")
+ }
+}
+
+func TestCopyFile(t *testing.T) {
+ tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
+ defer os.RemoveAll(tempFolder)
+ if err != nil {
+ t.Fatal(err)
+ }
+ src := path.Join(tempFolder, "src")
+ dest := path.Join(tempFolder, "dest")
+ ioutil.WriteFile(src, []byte("content"), 0777)
+ ioutil.WriteFile(dest, []byte("destContent"), 0777)
+ bytes, err := CopyFile(src, dest)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if bytes != 7 {
+ t.Fatalf("Should have written %d bytes but wrote %d", 7, bytes)
+ }
+ actual, err := ioutil.ReadFile(dest)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if string(actual) != "content" {
+ t.Fatalf("Dest content was '%s', expected '%s'", string(actual), "content")
+ }
+}
+
+// Reading a symlink to a directory must return the directory
+func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) {
+ // TODO Windows: Port this test
+ if runtime.GOOS == "windows" {
+ t.Skip("Needs porting to Windows")
+ }
+ var err error
+ if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil {
+ t.Errorf("failed to create directory: %s", err)
+ }
+
+ if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil {
+ t.Errorf("failed to create symlink: %s", err)
+ }
+
+ var path string
+ if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil {
+ t.Fatalf("failed to read symlink to directory: %s", err)
+ }
+
+ if path != "/tmp/testReadSymlinkToExistingDirectory" {
+ t.Fatalf("symlink returned unexpected directory: %s", path)
+ }
+
+ if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil {
+ t.Errorf("failed to remove temporary directory: %s", err)
+ }
+
+ if err = os.Remove("/tmp/dirLinkTest"); err != nil {
+ t.Errorf("failed to remove symlink: %s", err)
+ }
+}
+
+// Reading a non-existing symlink must fail
+func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) {
+ var path string
+ var err error
+ if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil {
+ t.Fatalf("error expected for non-existing symlink")
+ }
+
+ if path != "" {
+ t.Fatalf("expected empty path, but '%s' was returned", path)
+ }
+}
+
+// Reading a symlink to a file must fail
+func TestReadSymlinkedDirectoryToFile(t *testing.T) {
+ // TODO Windows: Port this test
+ if runtime.GOOS == "windows" {
+ t.Skip("Needs porting to Windows")
+ }
+ var err error
+ var file *os.File
+
+ if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil {
+ t.Fatalf("failed to create file: %s", err)
+ }
+
+ file.Close()
+
+ if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil {
+ t.Errorf("failed to create symlink: %s", err)
+ }
+
+ var path string
+ if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil {
+ t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed")
+ }
+
+ if path != "" {
+ t.Fatalf("path should've been empty: %s", path)
+ }
+
+ if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil {
+ t.Errorf("failed to remove file: %s", err)
+ }
+
+ if err = os.Remove("/tmp/fileLinkTest"); err != nil {
+ t.Errorf("failed to remove symlink: %s", err)
+ }
+}
+
+func TestWildcardMatches(t *testing.T) {
+ match, _ := Matches("fileutils.go", []string{"*"})
+ if match != true {
+ t.Errorf("failed to get a wildcard match, got %v", match)
+ }
+}
+
+// A simple pattern match should return true.
+func TestPatternMatches(t *testing.T) {
+ match, _ := Matches("fileutils.go", []string{"*.go"})
+ if match != true {
+ t.Errorf("failed to get a match, got %v", match)
+ }
+}
+
+// An exclusion followed by an inclusion should return true.
+func TestExclusionPatternMatchesPatternBefore(t *testing.T) {
+ match, _ := Matches("fileutils.go", []string{"!fileutils.go", "*.go"})
+ if match != true {
+ t.Errorf("failed to get true match on exclusion pattern, got %v", match)
+ }
+}
+
+// A folder pattern followed by an exception should return false.
+func TestPatternMatchesFolderExclusions(t *testing.T) {
+ match, _ := Matches("docs/README.md", []string{"docs", "!docs/README.md"})
+ if match != false {
+ t.Errorf("failed to get a false match on exclusion pattern, got %v", match)
+ }
+}
+
+// A folder pattern followed by an exception should return false.
+func TestPatternMatchesFolderWithSlashExclusions(t *testing.T) {
+ match, _ := Matches("docs/README.md", []string{"docs/", "!docs/README.md"})
+ if match != false {
+ t.Errorf("failed to get a false match on exclusion pattern, got %v", match)
+ }
+}
+
+// A folder pattern followed by an exception should return false.
+func TestPatternMatchesFolderWildcardExclusions(t *testing.T) {
+ match, _ := Matches("docs/README.md", []string{"docs/*", "!docs/README.md"})
+ if match != false {
+ t.Errorf("failed to get a false match on exclusion pattern, got %v", match)
+ }
+}
+
+// A pattern followed by an exclusion should return false.
+func TestExclusionPatternMatchesPatternAfter(t *testing.T) {
+ match, _ := Matches("fileutils.go", []string{"*.go", "!fileutils.go"})
+ if match != false {
+ t.Errorf("failed to get false match on exclusion pattern, got %v", match)
+ }
+}
+
+// A filename evaluating to . should return false.
+func TestExclusionPatternMatchesWholeDirectory(t *testing.T) {
+ match, _ := Matches(".", []string{"*.go"})
+ if match != false {
+ t.Errorf("failed to get false match on ., got %v", match)
+ }
+}
+
+// A single ! pattern should return an error.
+func TestSingleExclamationError(t *testing.T) {
+ _, err := Matches("fileutils.go", []string{"!"})
+ if err == nil {
+ t.Errorf("failed to get an error for a single exclamation point, got %v", err)
+ }
+}
+
+// A string preceded with a ! should return true from Exclusion.
+func TestExclusion(t *testing.T) {
+ exclusion := exclusion("!")
+ if !exclusion {
+ t.Errorf("failed to get true for a single !, got %v", exclusion)
+ }
+}
+
+// Matches with no patterns
+func TestMatchesWithNoPatterns(t *testing.T) {
+ matches, err := Matches("/any/path/there", []string{})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if matches {
+ t.Fatalf("Should not have match anything")
+ }
+}
+
+// Matches with malformed patterns
+func TestMatchesWithMalformedPatterns(t *testing.T) {
+ matches, err := Matches("/any/path/there", []string{"["})
+ if err == nil {
+ t.Fatal("Should have failed because of a malformed syntax in the pattern")
+ }
+ if matches {
+ t.Fatalf("Should not have match anything")
+ }
+}
+
+// Test lots of variants of patterns & strings
+func TestMatches(t *testing.T) {
+ // TODO Windows: Port this test
+ if runtime.GOOS == "windows" {
+ t.Skip("Needs porting to Windows")
+ }
+ tests := []struct {
+ pattern string
+ text string
+ pass bool
+ }{
+ {"**", "file", true},
+ {"**", "file/", true},
+ {"**/", "file", true}, // weird one
+ {"**/", "file/", true},
+ {"**", "/", true},
+ {"**/", "/", true},
+ {"**", "dir/file", true},
+ {"**/", "dir/file", false},
+ {"**", "dir/file/", true},
+ {"**/", "dir/file/", true},
+ {"**/**", "dir/file", true},
+ {"**/**", "dir/file/", true},
+ {"dir/**", "dir/file", true},
+ {"dir/**", "dir/file/", true},
+ {"dir/**", "dir/dir2/file", true},
+ {"dir/**", "dir/dir2/file/", true},
+ {"**/dir2/*", "dir/dir2/file", true},
+ {"**/dir2/*", "dir/dir2/file/", false},
+ {"**/dir2/**", "dir/dir2/dir3/file", true},
+ {"**/dir2/**", "dir/dir2/dir3/file/", true},
+ {"**file", "file", true},
+ {"**file", "dir/file", true},
+ {"**/file", "dir/file", true},
+ {"**file", "dir/dir/file", true},
+ {"**/file", "dir/dir/file", true},
+ {"**/file*", "dir/dir/file", true},
+ {"**/file*", "dir/dir/file.txt", true},
+ {"**/file*txt", "dir/dir/file.txt", true},
+ {"**/file*.txt", "dir/dir/file.txt", true},
+ {"**/file*.txt*", "dir/dir/file.txt", true},
+ {"**/**/*.txt", "dir/dir/file.txt", true},
+ {"**/**/*.txt2", "dir/dir/file.txt", false},
+ {"**/*.txt", "file.txt", true},
+ {"**/**/*.txt", "file.txt", true},
+ {"a**/*.txt", "a/file.txt", true},
+ {"a**/*.txt", "a/dir/file.txt", true},
+ {"a**/*.txt", "a/dir/dir/file.txt", true},
+ {"a/*.txt", "a/dir/file.txt", false},
+ {"a/*.txt", "a/file.txt", true},
+ {"a/*.txt**", "a/file.txt", true},
+ {"a[b-d]e", "ae", false},
+ {"a[b-d]e", "ace", true},
+ {"a[b-d]e", "aae", false},
+ {"a[^b-d]e", "aze", true},
+ {".*", ".foo", true},
+ {".*", "foo", false},
+ {"abc.def", "abcdef", false},
+ {"abc.def", "abc.def", true},
+ {"abc.def", "abcZdef", false},
+ {"abc?def", "abcZdef", true},
+ {"abc?def", "abcdef", false},
+ {"a\\*b", "a*b", true},
+ {"a\\", "a", false},
+ {"a\\", "a\\", false},
+ {"a\\\\", "a\\", true},
+ {"**/foo/bar", "foo/bar", true},
+ {"**/foo/bar", "dir/foo/bar", true},
+ {"**/foo/bar", "dir/dir2/foo/bar", true},
+ {"abc/**", "abc", false},
+ {"abc/**", "abc/def", true},
+ {"abc/**", "abc/def/ghi", true},
+ }
+
+ for _, test := range tests {
+ res, _ := regexpMatch(test.pattern, test.text)
+ if res != test.pass {
+ t.Fatalf("Failed: %v - res:%v", test, res)
+ }
+ }
+}
+
+// An empty string should return true from Empty.
+func TestEmpty(t *testing.T) {
+ empty := empty("")
+ if !empty {
+ t.Errorf("failed to get true for an empty string, got %v", empty)
+ }
+}
+
+func TestCleanPatterns(t *testing.T) {
+ cleaned, _, _, _ := CleanPatterns([]string{"docs", "config"})
+ if len(cleaned) != 2 {
+ t.Errorf("expected 2 element slice, got %v", len(cleaned))
+ }
+}
+
+func TestCleanPatternsStripEmptyPatterns(t *testing.T) {
+ cleaned, _, _, _ := CleanPatterns([]string{"docs", "config", ""})
+ if len(cleaned) != 2 {
+ t.Errorf("expected 2 element slice, got %v", len(cleaned))
+ }
+}
+
+func TestCleanPatternsExceptionFlag(t *testing.T) {
+ _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md"})
+ if !exceptions {
+ t.Errorf("expected exceptions to be true, got %v", exceptions)
+ }
+}
+
+func TestCleanPatternsLeadingSpaceTrimmed(t *testing.T) {
+ _, _, exceptions, _ := CleanPatterns([]string{"docs", " !docs/README.md"})
+ if !exceptions {
+ t.Errorf("expected exceptions to be true, got %v", exceptions)
+ }
+}
+
+func TestCleanPatternsTrailingSpaceTrimmed(t *testing.T) {
+ _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md "})
+ if !exceptions {
+ t.Errorf("expected exceptions to be true, got %v", exceptions)
+ }
+}
+
+func TestCleanPatternsErrorSingleException(t *testing.T) {
+ _, _, _, err := CleanPatterns([]string{"!"})
+ if err == nil {
+ t.Errorf("expected error on single exclamation point, got %v", err)
+ }
+}
+
+func TestCleanPatternsFolderSplit(t *testing.T) {
+ _, dirs, _, _ := CleanPatterns([]string{"docs/config/CONFIG.md"})
+ if dirs[0][0] != "docs" {
+ t.Errorf("expected first element in dirs slice to be docs, got %v", dirs[0][1])
+ }
+ if dirs[0][1] != "config" {
+ t.Errorf("expected first element in dirs slice to be config, got %v", dirs[0][1])
+ }
+}
+
+func TestCreateIfNotExistsDir(t *testing.T) {
+ tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tempFolder)
+
+ folderToCreate := filepath.Join(tempFolder, "tocreate")
+
+ if err := CreateIfNotExists(folderToCreate, true); err != nil {
+ t.Fatal(err)
+ }
+ fileinfo, err := os.Stat(folderToCreate)
+ if err != nil {
+ t.Fatalf("Should have create a folder, got %v", err)
+ }
+
+ if !fileinfo.IsDir() {
+ t.Fatalf("Should have been a dir, seems it's not")
+ }
+}
+
+func TestCreateIfNotExistsFile(t *testing.T) {
+ tempFolder, err := ioutil.TempDir("", "docker-fileutils-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tempFolder)
+
+ fileToCreate := filepath.Join(tempFolder, "file/to/create")
+
+ if err := CreateIfNotExists(fileToCreate, false); err != nil {
+ t.Fatal(err)
+ }
+ fileinfo, err := os.Stat(fileToCreate)
+ if err != nil {
+ t.Fatalf("Should have create a file, got %v", err)
+ }
+
+ if fileinfo.IsDir() {
+ t.Fatalf("Should have been a file, seems it's not")
+ }
+}
+
+// These matchTests are stolen from go's filepath Match tests.
+type matchTest struct {
+ pattern, s string
+ match bool
+ err error
+}
+
+var matchTests = []matchTest{
+ {"abc", "abc", true, nil},
+ {"*", "abc", true, nil},
+ {"*c", "abc", true, nil},
+ {"a*", "a", true, nil},
+ {"a*", "abc", true, nil},
+ {"a*", "ab/c", false, nil},
+ {"a*/b", "abc/b", true, nil},
+ {"a*/b", "a/c/b", false, nil},
+ {"a*b*c*d*e*/f", "axbxcxdxe/f", true, nil},
+ {"a*b*c*d*e*/f", "axbxcxdxexxx/f", true, nil},
+ {"a*b*c*d*e*/f", "axbxcxdxe/xxx/f", false, nil},
+ {"a*b*c*d*e*/f", "axbxcxdxexxx/fff", false, nil},
+ {"a*b?c*x", "abxbbxdbxebxczzx", true, nil},
+ {"a*b?c*x", "abxbbxdbxebxczzy", false, nil},
+ {"ab[c]", "abc", true, nil},
+ {"ab[b-d]", "abc", true, nil},
+ {"ab[e-g]", "abc", false, nil},
+ {"ab[^c]", "abc", false, nil},
+ {"ab[^b-d]", "abc", false, nil},
+ {"ab[^e-g]", "abc", true, nil},
+ {"a\\*b", "a*b", true, nil},
+ {"a\\*b", "ab", false, nil},
+ {"a?b", "a☺b", true, nil},
+ {"a[^a]b", "a☺b", true, nil},
+ {"a???b", "a☺b", false, nil},
+ {"a[^a][^a][^a]b", "a☺b", false, nil},
+ {"[a-ζ]*", "α", true, nil},
+ {"*[a-ζ]", "A", false, nil},
+ {"a?b", "a/b", false, nil},
+ {"a*b", "a/b", false, nil},
+ {"[\\]a]", "]", true, nil},
+ {"[\\-]", "-", true, nil},
+ {"[x\\-]", "x", true, nil},
+ {"[x\\-]", "-", true, nil},
+ {"[x\\-]", "z", false, nil},
+ {"[\\-x]", "x", true, nil},
+ {"[\\-x]", "-", true, nil},
+ {"[\\-x]", "a", false, nil},
+ {"[]a]", "]", false, filepath.ErrBadPattern},
+ {"[-]", "-", false, filepath.ErrBadPattern},
+ {"[x-]", "x", false, filepath.ErrBadPattern},
+ {"[x-]", "-", false, filepath.ErrBadPattern},
+ {"[x-]", "z", false, filepath.ErrBadPattern},
+ {"[-x]", "x", false, filepath.ErrBadPattern},
+ {"[-x]", "-", false, filepath.ErrBadPattern},
+ {"[-x]", "a", false, filepath.ErrBadPattern},
+ {"\\", "a", false, filepath.ErrBadPattern},
+ {"[a-b-c]", "a", false, filepath.ErrBadPattern},
+ {"[", "a", false, filepath.ErrBadPattern},
+ {"[^", "a", false, filepath.ErrBadPattern},
+ {"[^bc", "a", false, filepath.ErrBadPattern},
+ {"a[", "a", false, filepath.ErrBadPattern}, // was nil but IMO its wrong
+ {"a[", "ab", false, filepath.ErrBadPattern},
+ {"*x", "xxx", true, nil},
+}
+
+func errp(e error) string {
+ if e == nil {
+ return ""
+ }
+ return e.Error()
+}
+
+// TestMatch test's our version of filepath.Match, called regexpMatch.
+func TestMatch(t *testing.T) {
+ for _, tt := range matchTests {
+ pattern := tt.pattern
+ s := tt.s
+ if runtime.GOOS == "windows" {
+ if strings.Index(pattern, "\\") >= 0 {
+ // no escape allowed on windows.
+ continue
+ }
+ pattern = filepath.Clean(pattern)
+ s = filepath.Clean(s)
+ }
+ ok, err := regexpMatch(pattern, s)
+ if ok != tt.match || err != tt.err {
+ t.Fatalf("Match(%#q, %#q) = %v, %q want %v, %q", pattern, s, ok, errp(err), tt.match, errp(tt.err))
+ }
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go
new file mode 100644
index 00000000000..d5c3abf5688
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go
@@ -0,0 +1,22 @@
+// +build linux freebsd
+
+package fileutils
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ "github.com/Sirupsen/logrus"
+)
+
+// GetTotalUsedFds Returns the number of used File Descriptors by
+// reading it via /proc filesystem.
+func GetTotalUsedFds() int {
+ if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
+ logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
+ } else {
+ return len(fds)
+ }
+ return -1
+}
diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go
new file mode 100644
index 00000000000..5ec21cace52
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go
@@ -0,0 +1,7 @@
+package fileutils
+
+// GetTotalUsedFds Returns the number of used File Descriptors. Not supported
+// on Windows.
+func GetTotalUsedFds() int {
+ return -1
+}
diff --git a/vendor/github.com/docker/docker/pkg/gitutils/gitutils.go b/vendor/github.com/docker/docker/pkg/gitutils/gitutils.go
new file mode 100644
index 00000000000..ded091f2a25
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/gitutils/gitutils.go
@@ -0,0 +1,100 @@
+package gitutils
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/docker/pkg/symlink"
+ "github.com/docker/docker/pkg/urlutil"
+)
+
+// Clone clones a repository into a newly created directory which
+// will be under "docker-build-git"
+func Clone(remoteURL string) (string, error) {
+ if !urlutil.IsGitTransport(remoteURL) {
+ remoteURL = "https://" + remoteURL
+ }
+ root, err := ioutil.TempDir("", "docker-build-git")
+ if err != nil {
+ return "", err
+ }
+
+ u, err := url.Parse(remoteURL)
+ if err != nil {
+ return "", err
+ }
+
+ fragment := u.Fragment
+ clone := cloneArgs(u, root)
+
+ if output, err := git(clone...); err != nil {
+ return "", fmt.Errorf("Error trying to use git: %s (%s)", err, output)
+ }
+
+ return checkoutGit(fragment, root)
+}
+
+func cloneArgs(remoteURL *url.URL, root string) []string {
+ args := []string{"clone", "--recursive"}
+ shallow := len(remoteURL.Fragment) == 0
+
+ if shallow && strings.HasPrefix(remoteURL.Scheme, "http") {
+ res, err := http.Head(fmt.Sprintf("%s/info/refs?service=git-upload-pack", remoteURL))
+ if err != nil || res.Header.Get("Content-Type") != "application/x-git-upload-pack-advertisement" {
+ shallow = false
+ }
+ }
+
+ if shallow {
+ args = append(args, "--depth", "1")
+ }
+
+ if remoteURL.Fragment != "" {
+ remoteURL.Fragment = ""
+ }
+
+ return append(args, remoteURL.String(), root)
+}
+
+func checkoutGit(fragment, root string) (string, error) {
+ refAndDir := strings.SplitN(fragment, ":", 2)
+
+ if len(refAndDir[0]) != 0 {
+ if output, err := gitWithinDir(root, "checkout", refAndDir[0]); err != nil {
+ return "", fmt.Errorf("Error trying to use git: %s (%s)", err, output)
+ }
+ }
+
+ if len(refAndDir) > 1 && len(refAndDir[1]) != 0 {
+ newCtx, err := symlink.FollowSymlinkInScope(filepath.Join(root, refAndDir[1]), root)
+ if err != nil {
+ return "", fmt.Errorf("Error setting git context, %q not within git root: %s", refAndDir[1], err)
+ }
+
+ fi, err := os.Stat(newCtx)
+ if err != nil {
+ return "", err
+ }
+ if !fi.IsDir() {
+ return "", fmt.Errorf("Error setting git context, not a directory: %s", newCtx)
+ }
+ root = newCtx
+ }
+
+ return root, nil
+}
+
+func gitWithinDir(dir string, args ...string) ([]byte, error) {
+ a := []string{"--work-tree", dir, "--git-dir", filepath.Join(dir, ".git")}
+ return git(append(a, args...)...)
+}
+
+func git(args ...string) ([]byte, error) {
+ return exec.Command("git", args...).CombinedOutput()
+}
diff --git a/vendor/github.com/docker/docker/pkg/gitutils/gitutils_test.go b/vendor/github.com/docker/docker/pkg/gitutils/gitutils_test.go
new file mode 100644
index 00000000000..d197058d20a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/gitutils/gitutils_test.go
@@ -0,0 +1,220 @@
+package gitutils
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+func TestCloneArgsSmartHttp(t *testing.T) {
+ mux := http.NewServeMux()
+ server := httptest.NewServer(mux)
+ serverURL, _ := url.Parse(server.URL)
+
+ serverURL.Path = "/repo.git"
+ gitURL := serverURL.String()
+
+ mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) {
+ q := r.URL.Query().Get("service")
+ w.Header().Set("Content-Type", fmt.Sprintf("application/x-%s-advertisement", q))
+ })
+
+ args := cloneArgs(serverURL, "/tmp")
+ exp := []string{"clone", "--recursive", "--depth", "1", gitURL, "/tmp"}
+ if !reflect.DeepEqual(args, exp) {
+ t.Fatalf("Expected %v, got %v", exp, args)
+ }
+}
+
+func TestCloneArgsDumbHttp(t *testing.T) {
+ mux := http.NewServeMux()
+ server := httptest.NewServer(mux)
+ serverURL, _ := url.Parse(server.URL)
+
+ serverURL.Path = "/repo.git"
+ gitURL := serverURL.String()
+
+ mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "text/plain")
+ })
+
+ args := cloneArgs(serverURL, "/tmp")
+ exp := []string{"clone", "--recursive", gitURL, "/tmp"}
+ if !reflect.DeepEqual(args, exp) {
+ t.Fatalf("Expected %v, got %v", exp, args)
+ }
+}
+
+func TestCloneArgsGit(t *testing.T) {
+ u, _ := url.Parse("git://github.com/docker/docker")
+ args := cloneArgs(u, "/tmp")
+ exp := []string{"clone", "--recursive", "--depth", "1", "git://github.com/docker/docker", "/tmp"}
+ if !reflect.DeepEqual(args, exp) {
+ t.Fatalf("Expected %v, got %v", exp, args)
+ }
+}
+
+func TestCloneArgsStripFragment(t *testing.T) {
+ u, _ := url.Parse("git://github.com/docker/docker#test")
+ args := cloneArgs(u, "/tmp")
+ exp := []string{"clone", "--recursive", "git://github.com/docker/docker", "/tmp"}
+ if !reflect.DeepEqual(args, exp) {
+ t.Fatalf("Expected %v, got %v", exp, args)
+ }
+}
+
+func gitGetConfig(name string) string {
+ b, err := git([]string{"config", "--get", name}...)
+ if err != nil {
+ // since we are interested in empty or non empty string,
+ // we can safely ignore the err here.
+ return ""
+ }
+ return strings.TrimSpace(string(b))
+}
+
+func TestCheckoutGit(t *testing.T) {
+ root, err := ioutil.TempDir("", "docker-build-git-checkout")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(root)
+
+ autocrlf := gitGetConfig("core.autocrlf")
+ if !(autocrlf == "true" || autocrlf == "false" ||
+ autocrlf == "input" || autocrlf == "") {
+ t.Logf("unknown core.autocrlf value: \"%s\"", autocrlf)
+ }
+ eol := "\n"
+ if autocrlf == "true" {
+ eol = "\r\n"
+ }
+
+ gitDir := filepath.Join(root, "repo")
+ _, err = git("init", gitDir)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err = gitWithinDir(gitDir, "config", "user.email", "test@docker.com"); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err = gitWithinDir(gitDir, "config", "user.name", "Docker test"); err != nil {
+ t.Fatal(err)
+ }
+
+ if err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch"), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ subDir := filepath.Join(gitDir, "subdir")
+ if err = os.Mkdir(subDir, 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ if err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 5000"), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ if runtime.GOOS != "windows" {
+ if err = os.Symlink("../subdir", filepath.Join(gitDir, "parentlink")); err != nil {
+ t.Fatal(err)
+ }
+
+ if err = os.Symlink("/subdir", filepath.Join(gitDir, "absolutelink")); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if _, err = gitWithinDir(gitDir, "add", "-A"); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err = gitWithinDir(gitDir, "commit", "-am", "First commit"); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err = gitWithinDir(gitDir, "checkout", "-b", "test"); err != nil {
+ t.Fatal(err)
+ }
+
+ if err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 3000"), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ if err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM busybox\nEXPOSE 5000"), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err = gitWithinDir(gitDir, "add", "-A"); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err = gitWithinDir(gitDir, "commit", "-am", "Branch commit"); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err = gitWithinDir(gitDir, "checkout", "master"); err != nil {
+ t.Fatal(err)
+ }
+
+ type singleCase struct {
+ frag string
+ exp string
+ fail bool
+ }
+
+ cases := []singleCase{
+ {"", "FROM scratch", false},
+ {"master", "FROM scratch", false},
+ {":subdir", "FROM scratch" + eol + "EXPOSE 5000", false},
+ {":nosubdir", "", true}, // missing directory error
+ {":Dockerfile", "", true}, // not a directory error
+ {"master:nosubdir", "", true},
+ {"master:subdir", "FROM scratch" + eol + "EXPOSE 5000", false},
+ {"master:../subdir", "", true},
+ {"test", "FROM scratch" + eol + "EXPOSE 3000", false},
+ {"test:", "FROM scratch" + eol + "EXPOSE 3000", false},
+ {"test:subdir", "FROM busybox" + eol + "EXPOSE 5000", false},
+ }
+
+ if runtime.GOOS != "windows" {
+ // Windows GIT (2.7.1 x64) does not support parentlink/absolutelink. Sample output below
+ // git --work-tree .\repo --git-dir .\repo\.git add -A
+ // error: readlink("absolutelink"): Function not implemented
+ // error: unable to index file absolutelink
+ // fatal: adding files failed
+ cases = append(cases, singleCase{frag: "master:absolutelink", exp: "FROM scratch" + eol + "EXPOSE 5000", fail: false})
+ cases = append(cases, singleCase{frag: "master:parentlink", exp: "FROM scratch" + eol + "EXPOSE 5000", fail: false})
+ }
+
+ for _, c := range cases {
+ r, err := checkoutGit(c.frag, gitDir)
+
+ fail := err != nil
+ if fail != c.fail {
+ t.Fatalf("Expected %v failure, error was %v\n", c.fail, err)
+ }
+ if c.fail {
+ continue
+ }
+
+ b, err := ioutil.ReadFile(filepath.Join(r, "Dockerfile"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if string(b) != c.exp {
+ t.Fatalf("Expected %v, was %v\n", c.exp, string(b))
+ }
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/graphdb/conn_sqlite3.go b/vendor/github.com/docker/docker/pkg/graphdb/conn_sqlite3.go
new file mode 100644
index 00000000000..dbcf44c2568
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/graphdb/conn_sqlite3.go
@@ -0,0 +1,15 @@
+// +build cgo
+
+package graphdb
+
+import "database/sql"
+
+// NewSqliteConn opens a connection to a sqlite
+// database.
+func NewSqliteConn(root string) (*Database, error) {
+ conn, err := sql.Open("sqlite3", root)
+ if err != nil {
+ return nil, err
+ }
+ return NewDatabase(conn)
+}
diff --git a/vendor/github.com/docker/docker/pkg/graphdb/conn_sqlite3_unix.go b/vendor/github.com/docker/docker/pkg/graphdb/conn_sqlite3_unix.go
new file mode 100644
index 00000000000..f932fff2867
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/graphdb/conn_sqlite3_unix.go
@@ -0,0 +1,7 @@
+// +build cgo,!windows
+
+package graphdb
+
+import (
+ _ "github.com/mattn/go-sqlite3" // registers sqlite
+)
diff --git a/vendor/github.com/docker/docker/pkg/graphdb/conn_sqlite3_windows.go b/vendor/github.com/docker/docker/pkg/graphdb/conn_sqlite3_windows.go
new file mode 100644
index 00000000000..52590303d41
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/graphdb/conn_sqlite3_windows.go
@@ -0,0 +1,7 @@
+// +build cgo,windows
+
+package graphdb
+
+import (
+ _ "github.com/mattn/go-sqlite3" // registers sqlite
+)
diff --git a/vendor/github.com/docker/docker/pkg/graphdb/conn_unsupported.go b/vendor/github.com/docker/docker/pkg/graphdb/conn_unsupported.go
new file mode 100644
index 00000000000..cf977050dad
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/graphdb/conn_unsupported.go
@@ -0,0 +1,8 @@
+// +build !cgo
+
+package graphdb
+
+// NewSqliteConn return a new sqlite connection.
+func NewSqliteConn(root string) (*Database, error) {
+ panic("Not implemented")
+}
diff --git a/vendor/github.com/docker/docker/pkg/graphdb/graphdb.go b/vendor/github.com/docker/docker/pkg/graphdb/graphdb.go
new file mode 100644
index 00000000000..eca433fa851
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/graphdb/graphdb.go
@@ -0,0 +1,551 @@
+package graphdb
+
+import (
+ "database/sql"
+ "fmt"
+ "path"
+ "strings"
+ "sync"
+)
+
+const (
+ createEntityTable = `
+ CREATE TABLE IF NOT EXISTS entity (
+ id text NOT NULL PRIMARY KEY
+ );`
+
+ createEdgeTable = `
+ CREATE TABLE IF NOT EXISTS edge (
+ "entity_id" text NOT NULL,
+ "parent_id" text NULL,
+ "name" text NOT NULL,
+ CONSTRAINT "parent_fk" FOREIGN KEY ("parent_id") REFERENCES "entity" ("id"),
+ CONSTRAINT "entity_fk" FOREIGN KEY ("entity_id") REFERENCES "entity" ("id")
+ );
+ `
+
+ createEdgeIndices = `
+ CREATE UNIQUE INDEX IF NOT EXISTS "name_parent_ix" ON "edge" (parent_id, name);
+ `
+)
+
+// Entity with a unique id.
+type Entity struct {
+ id string
+}
+
+// An Edge connects two entities together.
+type Edge struct {
+ EntityID string
+ Name string
+ ParentID string
+}
+
+// Entities stores the list of entities.
+type Entities map[string]*Entity
+
+// Edges stores the relationships between entities.
+type Edges []*Edge
+
+// WalkFunc is a function invoked to process an individual entity.
+type WalkFunc func(fullPath string, entity *Entity) error
+
+// Database is a graph database for storing entities and their relationships.
+type Database struct {
+ conn *sql.DB
+ mux sync.RWMutex
+}
+
+// IsNonUniqueNameError processes the error to check if it's caused by
+// a constraint violation.
+// This is necessary because the error isn't the same across various
+// sqlite versions.
+func IsNonUniqueNameError(err error) bool {
+ str := err.Error()
+ // sqlite 3.7.17-1ubuntu1 returns:
+ // Set failure: Abort due to constraint violation: columns parent_id, name are not unique
+ if strings.HasSuffix(str, "name are not unique") {
+ return true
+ }
+ // sqlite-3.8.3-1.fc20 returns:
+ // Set failure: Abort due to constraint violation: UNIQUE constraint failed: edge.parent_id, edge.name
+ if strings.Contains(str, "UNIQUE constraint failed") && strings.Contains(str, "edge.name") {
+ return true
+ }
+ // sqlite-3.6.20-1.el6 returns:
+ // Set failure: Abort due to constraint violation: constraint failed
+ if strings.HasSuffix(str, "constraint failed") {
+ return true
+ }
+ return false
+}
+
+// NewDatabase creates a new graph database initialized with a root entity.
+func NewDatabase(conn *sql.DB) (*Database, error) {
+ if conn == nil {
+ return nil, fmt.Errorf("Database connection cannot be nil")
+ }
+ db := &Database{conn: conn}
+
+ // Create root entities
+ tx, err := conn.Begin()
+ if err != nil {
+ return nil, err
+ }
+
+ if _, err := tx.Exec(createEntityTable); err != nil {
+ return nil, err
+ }
+ if _, err := tx.Exec(createEdgeTable); err != nil {
+ return nil, err
+ }
+ if _, err := tx.Exec(createEdgeIndices); err != nil {
+ return nil, err
+ }
+
+ if _, err := tx.Exec("DELETE FROM entity where id = ?", "0"); err != nil {
+ tx.Rollback()
+ return nil, err
+ }
+
+ if _, err := tx.Exec("INSERT INTO entity (id) VALUES (?);", "0"); err != nil {
+ tx.Rollback()
+ return nil, err
+ }
+
+ if _, err := tx.Exec("DELETE FROM edge where entity_id=? and name=?", "0", "/"); err != nil {
+ tx.Rollback()
+ return nil, err
+ }
+
+ if _, err := tx.Exec("INSERT INTO edge (entity_id, name) VALUES(?,?);", "0", "/"); err != nil {
+ tx.Rollback()
+ return nil, err
+ }
+
+ if err := tx.Commit(); err != nil {
+ return nil, err
+ }
+
+ return db, nil
+}
+
+// Close the underlying connection to the database.
+func (db *Database) Close() error {
+ return db.conn.Close()
+}
+
+// Set the entity id for a given path.
+func (db *Database) Set(fullPath, id string) (*Entity, error) {
+ db.mux.Lock()
+ defer db.mux.Unlock()
+
+ tx, err := db.conn.Begin()
+ if err != nil {
+ return nil, err
+ }
+
+ var entityID string
+ if err := tx.QueryRow("SELECT id FROM entity WHERE id = ?;", id).Scan(&entityID); err != nil {
+ if err == sql.ErrNoRows {
+ if _, err := tx.Exec("INSERT INTO entity (id) VALUES(?);", id); err != nil {
+ tx.Rollback()
+ return nil, err
+ }
+ } else {
+ tx.Rollback()
+ return nil, err
+ }
+ }
+ e := &Entity{id}
+
+ parentPath, name := splitPath(fullPath)
+ if err := db.setEdge(parentPath, name, e, tx); err != nil {
+ tx.Rollback()
+ return nil, err
+ }
+
+ if err := tx.Commit(); err != nil {
+ return nil, err
+ }
+ return e, nil
+}
+
+// Exists returns true if a name already exists in the database.
+func (db *Database) Exists(name string) bool {
+ db.mux.RLock()
+ defer db.mux.RUnlock()
+
+ e, err := db.get(name)
+ if err != nil {
+ return false
+ }
+ return e != nil
+}
+
+func (db *Database) setEdge(parentPath, name string, e *Entity, tx *sql.Tx) error {
+ parent, err := db.get(parentPath)
+ if err != nil {
+ return err
+ }
+ if parent.id == e.id {
+ return fmt.Errorf("Cannot set self as child")
+ }
+
+ if _, err := tx.Exec("INSERT INTO edge (parent_id, name, entity_id) VALUES (?,?,?);", parent.id, name, e.id); err != nil {
+ return err
+ }
+ return nil
+}
+
+// RootEntity returns the root "/" entity for the database.
+func (db *Database) RootEntity() *Entity {
+ return &Entity{
+ id: "0",
+ }
+}
+
+// Get returns the entity for a given path.
+func (db *Database) Get(name string) *Entity {
+ db.mux.RLock()
+ defer db.mux.RUnlock()
+
+ e, err := db.get(name)
+ if err != nil {
+ return nil
+ }
+ return e
+}
+
+func (db *Database) get(name string) (*Entity, error) {
+ e := db.RootEntity()
+ // We always know the root name so return it if
+ // it is requested
+ if name == "/" {
+ return e, nil
+ }
+
+ parts := split(name)
+ for i := 1; i < len(parts); i++ {
+ p := parts[i]
+ if p == "" {
+ continue
+ }
+
+ next := db.child(e, p)
+ if next == nil {
+ return nil, fmt.Errorf("Cannot find child for %s", name)
+ }
+ e = next
+ }
+ return e, nil
+
+}
+
+// List all entities by from the name.
+// The key will be the full path of the entity.
+func (db *Database) List(name string, depth int) Entities {
+ db.mux.RLock()
+ defer db.mux.RUnlock()
+
+ out := Entities{}
+ e, err := db.get(name)
+ if err != nil {
+ return out
+ }
+
+ children, err := db.children(e, name, depth, nil)
+ if err != nil {
+ return out
+ }
+
+ for _, c := range children {
+ out[c.FullPath] = c.Entity
+ }
+ return out
+}
+
+// Walk through the child graph of an entity, calling walkFunc for each child entity.
+// It is safe for walkFunc to call graph functions.
+func (db *Database) Walk(name string, walkFunc WalkFunc, depth int) error {
+ children, err := db.Children(name, depth)
+ if err != nil {
+ return err
+ }
+
+ // Note: the database lock must not be held while calling walkFunc
+ for _, c := range children {
+ if err := walkFunc(c.FullPath, c.Entity); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Children returns the children of the specified entity.
+func (db *Database) Children(name string, depth int) ([]WalkMeta, error) {
+ db.mux.RLock()
+ defer db.mux.RUnlock()
+
+ e, err := db.get(name)
+ if err != nil {
+ return nil, err
+ }
+
+ return db.children(e, name, depth, nil)
+}
+
+// Parents returns the parents of a specified entity.
+func (db *Database) Parents(name string) ([]string, error) {
+ db.mux.RLock()
+ defer db.mux.RUnlock()
+
+ e, err := db.get(name)
+ if err != nil {
+ return nil, err
+ }
+ return db.parents(e)
+}
+
+// Refs returns the reference count for a specified id.
+func (db *Database) Refs(id string) int {
+ db.mux.RLock()
+ defer db.mux.RUnlock()
+
+ var count int
+ if err := db.conn.QueryRow("SELECT COUNT(*) FROM edge WHERE entity_id = ?;", id).Scan(&count); err != nil {
+ return 0
+ }
+ return count
+}
+
+// RefPaths returns all the id's path references.
+func (db *Database) RefPaths(id string) Edges {
+ db.mux.RLock()
+ defer db.mux.RUnlock()
+
+ refs := Edges{}
+
+ rows, err := db.conn.Query("SELECT name, parent_id FROM edge WHERE entity_id = ?;", id)
+ if err != nil {
+ return refs
+ }
+ defer rows.Close()
+
+ for rows.Next() {
+ var name string
+ var parentID string
+ if err := rows.Scan(&name, &parentID); err != nil {
+ return refs
+ }
+ refs = append(refs, &Edge{
+ EntityID: id,
+ Name: name,
+ ParentID: parentID,
+ })
+ }
+ return refs
+}
+
+// Delete the reference to an entity at a given path.
+func (db *Database) Delete(name string) error {
+ db.mux.Lock()
+ defer db.mux.Unlock()
+
+ if name == "/" {
+ return fmt.Errorf("Cannot delete root entity")
+ }
+
+ parentPath, n := splitPath(name)
+ parent, err := db.get(parentPath)
+ if err != nil {
+ return err
+ }
+
+ if _, err := db.conn.Exec("DELETE FROM edge WHERE parent_id = ? AND name = ?;", parent.id, n); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Purge removes the entity with the specified id
+// Walk the graph to make sure all references to the entity
+// are removed and return the number of references removed
+func (db *Database) Purge(id string) (int, error) {
+ db.mux.Lock()
+ defer db.mux.Unlock()
+
+ tx, err := db.conn.Begin()
+ if err != nil {
+ return -1, err
+ }
+
+ // Delete all edges
+ rows, err := tx.Exec("DELETE FROM edge WHERE entity_id = ?;", id)
+ if err != nil {
+ tx.Rollback()
+ return -1, err
+ }
+ changes, err := rows.RowsAffected()
+ if err != nil {
+ return -1, err
+ }
+
+ // Clear who's using this id as parent
+ refs, err := tx.Exec("DELETE FROM edge WHERE parent_id = ?;", id)
+ if err != nil {
+ tx.Rollback()
+ return -1, err
+ }
+ refsCount, err := refs.RowsAffected()
+ if err != nil {
+ return -1, err
+ }
+
+ // Delete entity
+ if _, err := tx.Exec("DELETE FROM entity where id = ?;", id); err != nil {
+ tx.Rollback()
+ return -1, err
+ }
+
+ if err := tx.Commit(); err != nil {
+ return -1, err
+ }
+
+ return int(changes + refsCount), nil
+}
+
+// Rename an edge for a given path
+func (db *Database) Rename(currentName, newName string) error {
+ db.mux.Lock()
+ defer db.mux.Unlock()
+
+ parentPath, name := splitPath(currentName)
+ newParentPath, newEdgeName := splitPath(newName)
+
+ if parentPath != newParentPath {
+ return fmt.Errorf("Cannot rename when root paths do not match %s != %s", parentPath, newParentPath)
+ }
+
+ parent, err := db.get(parentPath)
+ if err != nil {
+ return err
+ }
+
+ rows, err := db.conn.Exec("UPDATE edge SET name = ? WHERE parent_id = ? AND name = ?;", newEdgeName, parent.id, name)
+ if err != nil {
+ return err
+ }
+ i, err := rows.RowsAffected()
+ if err != nil {
+ return err
+ }
+ if i == 0 {
+ return fmt.Errorf("Cannot locate edge for %s %s", parent.id, name)
+ }
+ return nil
+}
+
+// WalkMeta stores the walk metadata.
+type WalkMeta struct {
+ Parent *Entity
+ Entity *Entity
+ FullPath string
+ Edge *Edge
+}
+
+func (db *Database) children(e *Entity, name string, depth int, entities []WalkMeta) ([]WalkMeta, error) {
+ if e == nil {
+ return entities, nil
+ }
+
+ rows, err := db.conn.Query("SELECT entity_id, name FROM edge where parent_id = ?;", e.id)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ for rows.Next() {
+ var entityID, entityName string
+ if err := rows.Scan(&entityID, &entityName); err != nil {
+ return nil, err
+ }
+ child := &Entity{entityID}
+ edge := &Edge{
+ ParentID: e.id,
+ Name: entityName,
+ EntityID: child.id,
+ }
+
+ meta := WalkMeta{
+ Parent: e,
+ Entity: child,
+ FullPath: path.Join(name, edge.Name),
+ Edge: edge,
+ }
+
+ entities = append(entities, meta)
+
+ if depth != 0 {
+ nDepth := depth
+ if depth != -1 {
+ nDepth--
+ }
+ entities, err = db.children(child, meta.FullPath, nDepth, entities)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ return entities, nil
+}
+
+func (db *Database) parents(e *Entity) (parents []string, err error) {
+ if e == nil {
+ return parents, nil
+ }
+
+ rows, err := db.conn.Query("SELECT parent_id FROM edge where entity_id = ?;", e.id)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ for rows.Next() {
+ var parentID string
+ if err := rows.Scan(&parentID); err != nil {
+ return nil, err
+ }
+ parents = append(parents, parentID)
+ }
+
+ return parents, nil
+}
+
+// Return the entity based on the parent path and name.
+func (db *Database) child(parent *Entity, name string) *Entity {
+ var id string
+ if err := db.conn.QueryRow("SELECT entity_id FROM edge WHERE parent_id = ? AND name = ?;", parent.id, name).Scan(&id); err != nil {
+ return nil
+ }
+ return &Entity{id}
+}
+
+// ID returns the id used to reference this entity.
+func (e *Entity) ID() string {
+ return e.id
+}
+
+// Paths returns the paths sorted by depth.
+func (e Entities) Paths() []string {
+ out := make([]string, len(e))
+ var i int
+ for k := range e {
+ out[i] = k
+ i++
+ }
+ sortByDepth(out)
+
+ return out
+}
diff --git a/vendor/github.com/docker/docker/pkg/graphdb/graphdb_test.go b/vendor/github.com/docker/docker/pkg/graphdb/graphdb_test.go
new file mode 100644
index 00000000000..f0fb074b4d9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/graphdb/graphdb_test.go
@@ -0,0 +1,721 @@
+package graphdb
+
+import (
+ "database/sql"
+ "fmt"
+ "os"
+ "path"
+ "runtime"
+ "strconv"
+ "testing"
+
+ _ "github.com/mattn/go-sqlite3"
+)
+
+func newTestDb(t *testing.T) (*Database, string) {
+ p := path.Join(os.TempDir(), "sqlite.db")
+ conn, err := sql.Open("sqlite3", p)
+ db, err := NewDatabase(conn)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return db, p
+}
+
+func destroyTestDb(dbPath string) {
+ os.Remove(dbPath)
+}
+
+func TestNewDatabase(t *testing.T) {
+ db, dbpath := newTestDb(t)
+ if db == nil {
+ t.Fatal("Database should not be nil")
+ }
+ db.Close()
+ defer destroyTestDb(dbpath)
+}
+
+func TestCreateRootEntity(t *testing.T) {
+ db, dbpath := newTestDb(t)
+ defer destroyTestDb(dbpath)
+ root := db.RootEntity()
+ if root == nil {
+ t.Fatal("Root entity should not be nil")
+ }
+}
+
+func TestGetRootEntity(t *testing.T) {
+ db, dbpath := newTestDb(t)
+ defer destroyTestDb(dbpath)
+
+ e := db.Get("/")
+ if e == nil {
+ t.Fatal("Entity should not be nil")
+ }
+ if e.ID() != "0" {
+ t.Fatalf("Entity id should be 0, got %s", e.ID())
+ }
+}
+
+func TestSetEntityWithDifferentName(t *testing.T) {
+ db, dbpath := newTestDb(t)
+ defer destroyTestDb(dbpath)
+
+ db.Set("/test", "1")
+ if _, err := db.Set("/other", "1"); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestSetDuplicateEntity(t *testing.T) {
+ db, dbpath := newTestDb(t)
+ defer destroyTestDb(dbpath)
+
+ if _, err := db.Set("/foo", "42"); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := db.Set("/foo", "43"); err == nil {
+ t.Fatalf("Creating an entry with a duplicate path did not cause an error")
+ }
+}
+
+func TestCreateChild(t *testing.T) {
+ db, dbpath := newTestDb(t)
+ defer destroyTestDb(dbpath)
+
+ child, err := db.Set("/db", "1")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if child == nil {
+ t.Fatal("Child should not be nil")
+ }
+ if child.ID() != "1" {
+ t.Fail()
+ }
+}
+
+func TestParents(t *testing.T) {
+ db, dbpath := newTestDb(t)
+ defer destroyTestDb(dbpath)
+
+ for i := 1; i < 6; i++ {
+ a := strconv.Itoa(i)
+ if _, err := db.Set("/"+a, a); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ for i := 6; i < 11; i++ {
+ a := strconv.Itoa(i)
+ p := strconv.Itoa(i - 5)
+
+ key := fmt.Sprintf("/%s/%s", p, a)
+
+ if _, err := db.Set(key, a); err != nil {
+ t.Fatal(err)
+ }
+
+ parents, err := db.Parents(key)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(parents) != 1 {
+ t.Fatalf("Expected 1 entry for %s got %d", key, len(parents))
+ }
+
+ if parents[0] != p {
+ t.Fatalf("ID %s received, %s expected", parents[0], p)
+ }
+ }
+}
+
+func TestChildren(t *testing.T) {
+ // TODO Windows: Port this test
+ if runtime.GOOS == "windows" {
+ t.Skip("Needs porting to Windows")
+ }
+ db, dbpath := newTestDb(t)
+ defer destroyTestDb(dbpath)
+
+ str := "/"
+ for i := 1; i < 6; i++ {
+ a := strconv.Itoa(i)
+ if _, err := db.Set(str+a, a); err != nil {
+ t.Fatal(err)
+ }
+
+ str = str + a + "/"
+ }
+
+ str = "/"
+ for i := 10; i < 30; i++ { // 20 entities
+ a := strconv.Itoa(i)
+ if _, err := db.Set(str+a, a); err != nil {
+ t.Fatal(err)
+ }
+
+ str = str + a + "/"
+ }
+ entries, err := db.Children("/", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(entries) != 11 {
+ t.Fatalf("Expect 11 entries for / got %d", len(entries))
+ }
+
+ entries, err = db.Children("/", 20)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(entries) != 25 {
+ t.Fatalf("Expect 25 entries for / got %d", len(entries))
+ }
+}
+
+func TestListAllRootChildren(t *testing.T) {
+ // TODO Windows: Port this test
+ if runtime.GOOS == "windows" {
+ t.Skip("Needs porting to Windows")
+ }
+
+ db, dbpath := newTestDb(t)
+ defer destroyTestDb(dbpath)
+
+ for i := 1; i < 6; i++ {
+ a := strconv.Itoa(i)
+ if _, err := db.Set("/"+a, a); err != nil {
+ t.Fatal(err)
+ }
+ }
+ entries := db.List("/", -1)
+ if len(entries) != 5 {
+ t.Fatalf("Expect 5 entries for / got %d", len(entries))
+ }
+}
+
+func TestListAllSubChildren(t *testing.T) {
+ // TODO Windows: Port this test
+ if runtime.GOOS == "windows" {
+ t.Skip("Needs porting to Windows")
+ }
+ db, dbpath := newTestDb(t)
+ defer destroyTestDb(dbpath)
+
+ _, err := db.Set("/webapp", "1")
+ if err != nil {
+ t.Fatal(err)
+ }
+ child2, err := db.Set("/db", "2")
+ if err != nil {
+ t.Fatal(err)
+ }
+ child4, err := db.Set("/logs", "4")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := db.Set("/db/logs", child4.ID()); err != nil {
+ t.Fatal(err)
+ }
+
+ child3, err := db.Set("/sentry", "3")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := db.Set("/webapp/db", child2.ID()); err != nil {
+ t.Fatal(err)
+ }
+
+ entries := db.List("/webapp", 1)
+ if len(entries) != 3 {
+ t.Fatalf("Expect 3 entries for / got %d", len(entries))
+ }
+
+ entries = db.List("/webapp", 0)
+ if len(entries) != 2 {
+ t.Fatalf("Expect 2 entries for / got %d", len(entries))
+ }
+}
+
+func TestAddSelfAsChild(t *testing.T) {
+ // TODO Windows: Port this test
+ if runtime.GOOS == "windows" {
+ t.Skip("Needs porting to Windows")
+ }
+ db, dbpath := newTestDb(t)
+ defer destroyTestDb(dbpath)
+
+ child, err := db.Set("/test", "1")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := db.Set("/test/other", child.ID()); err == nil {
+ t.Fatal("Error should not be nil")
+ }
+}
+
+func TestAddChildToNonExistentRoot(t *testing.T) {
+ db, dbpath := newTestDb(t)
+ defer destroyTestDb(dbpath)
+
+ if _, err := db.Set("/myapp", "1"); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := db.Set("/myapp/proxy/db", "2"); err == nil {
+ t.Fatal("Error should not be nil")
+ }
+}
+
+func TestWalkAll(t *testing.T) {
+ // TODO Windows: Port this test
+ if runtime.GOOS == "windows" {
+ t.Skip("Needs porting to Windows")
+ }
+ db, dbpath := newTestDb(t)
+ defer destroyTestDb(dbpath)
+ _, err := db.Set("/webapp", "1")
+ if err != nil {
+ t.Fatal(err)
+ }
+ child2, err := db.Set("/db", "2")
+ if err != nil {
+ t.Fatal(err)
+ }
+ child4, err := db.Set("/db/logs", "4")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := db.Set("/webapp/logs", child4.ID()); err != nil {
+ t.Fatal(err)
+ }
+
+ child3, err := db.Set("/sentry", "3")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := db.Set("/webapp/db", child2.ID()); err != nil {
+ t.Fatal(err)
+ }
+
+ child5, err := db.Set("/gograph", "5")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := db.Walk("/", func(p string, e *Entity) error {
+ t.Logf("Path: %s Entity: %s", p, e.ID())
+ return nil
+ }, -1); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestGetEntityByPath(t *testing.T) {
+ // TODO Windows: Port this test
+ if runtime.GOOS == "windows" {
+ t.Skip("Needs porting to Windows")
+ }
+ db, dbpath := newTestDb(t)
+ defer destroyTestDb(dbpath)
+ _, err := db.Set("/webapp", "1")
+ if err != nil {
+ t.Fatal(err)
+ }
+ child2, err := db.Set("/db", "2")
+ if err != nil {
+ t.Fatal(err)
+ }
+ child4, err := db.Set("/logs", "4")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := db.Set("/db/logs", child4.ID()); err != nil {
+ t.Fatal(err)
+ }
+
+ child3, err := db.Set("/sentry", "3")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := db.Set("/webapp/db", child2.ID()); err != nil {
+ t.Fatal(err)
+ }
+
+ child5, err := db.Set("/gograph", "5")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil {
+ t.Fatal(err)
+ }
+
+ entity := db.Get("/webapp/db/logs")
+ if entity == nil {
+ t.Fatal("Entity should not be nil")
+ }
+ if entity.ID() != "4" {
+ t.Fatalf("Expected to get entity with id 4, got %s", entity.ID())
+ }
+}
+
+func TestEnitiesPaths(t *testing.T) {
+ // TODO Windows: Port this test
+ if runtime.GOOS == "windows" {
+ t.Skip("Needs porting to Windows")
+ }
+ db, dbpath := newTestDb(t)
+ defer destroyTestDb(dbpath)
+ _, err := db.Set("/webapp", "1")
+ if err != nil {
+ t.Fatal(err)
+ }
+ child2, err := db.Set("/db", "2")
+ if err != nil {
+ t.Fatal(err)
+ }
+ child4, err := db.Set("/logs", "4")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := db.Set("/db/logs", child4.ID()); err != nil {
+ t.Fatal(err)
+ }
+
+ child3, err := db.Set("/sentry", "3")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := db.Set("/webapp/db", child2.ID()); err != nil {
+ t.Fatal(err)
+ }
+
+ child5, err := db.Set("/gograph", "5")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil {
+ t.Fatal(err)
+ }
+
+ out := db.List("/", -1)
+ for _, p := range out.Paths() {
+ t.Log(p)
+ }
+}
+
+func TestDeleteRootEntity(t *testing.T) {
+ db, dbpath := newTestDb(t)
+ defer destroyTestDb(dbpath)
+
+ if err := db.Delete("/"); err == nil {
+ t.Fatal("Error should not be nil")
+ }
+}
+
+func TestDeleteEntity(t *testing.T) {
+ // TODO Windows: Port this test
+ if runtime.GOOS == "windows" {
+ t.Skip("Needs porting to Windows")
+ }
+ db, dbpath := newTestDb(t)
+ defer destroyTestDb(dbpath)
+ _, err := db.Set("/webapp", "1")
+ if err != nil {
+ t.Fatal(err)
+ }
+ child2, err := db.Set("/db", "2")
+ if err != nil {
+ t.Fatal(err)
+ }
+ child4, err := db.Set("/logs", "4")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := db.Set("/db/logs", child4.ID()); err != nil {
+ t.Fatal(err)
+ }
+
+ child3, err := db.Set("/sentry", "3")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := db.Set("/webapp/db", child2.ID()); err != nil {
+ t.Fatal(err)
+ }
+
+ child5, err := db.Set("/gograph", "5")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := db.Delete("/webapp/sentry"); err != nil {
+ t.Fatal(err)
+ }
+ entity := db.Get("/webapp/sentry")
+ if entity != nil {
+ t.Fatal("Entity /webapp/sentry should be nil")
+ }
+}
+
+func TestCountRefs(t *testing.T) {
+ // TODO Windows: Port this test
+ if runtime.GOOS == "windows" {
+ t.Skip("Needs porting to Windows")
+ }
+ db, dbpath := newTestDb(t)
+ defer destroyTestDb(dbpath)
+
+ db.Set("/webapp", "1")
+
+ if db.Refs("1") != 1 {
+ t.Fatal("Expect reference count to be 1")
+ }
+
+ db.Set("/db", "2")
+ db.Set("/webapp/db", "2")
+ if db.Refs("2") != 2 {
+ t.Fatal("Expect reference count to be 2")
+ }
+}
+
+func TestPurgeId(t *testing.T) {
+ // TODO Windows: Port this test
+ if runtime.GOOS == "windows" {
+ t.Skip("Needs porting to Windows")
+ }
+
+ db, dbpath := newTestDb(t)
+ defer destroyTestDb(dbpath)
+
+ db.Set("/webapp", "1")
+
+ if c := db.Refs("1"); c != 1 {
+ t.Fatalf("Expect reference count to be 1, got %d", c)
+ }
+
+ db.Set("/db", "2")
+ db.Set("/webapp/db", "2")
+
+ count, err := db.Purge("2")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if count != 2 {
+ t.Fatalf("Expected 2 references to be removed, got %d", count)
+ }
+}
+
+// Regression test https://github.com/docker/docker/issues/12334
+func TestPurgeIdRefPaths(t *testing.T) {
+ // TODO Windows: Port this test
+ if runtime.GOOS == "windows" {
+ t.Skip("Needs porting to Windows")
+ }
+ db, dbpath := newTestDb(t)
+ defer destroyTestDb(dbpath)
+
+ db.Set("/webapp", "1")
+ db.Set("/db", "2")
+
+ db.Set("/db/webapp", "1")
+
+ if c := db.Refs("1"); c != 2 {
+ t.Fatalf("Expected 2 reference for webapp, got %d", c)
+ }
+ if c := db.Refs("2"); c != 1 {
+ t.Fatalf("Expected 1 reference for db, got %d", c)
+ }
+
+ if rp := db.RefPaths("2"); len(rp) != 1 {
+ t.Fatalf("Expected 1 reference path for db, got %d", len(rp))
+ }
+
+ count, err := db.Purge("2")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if count != 2 {
+ t.Fatalf("Expected 2 rows to be removed, got %d", count)
+ }
+
+ if c := db.Refs("2"); c != 0 {
+ t.Fatalf("Expected 0 reference for db, got %d", c)
+ }
+ if c := db.Refs("1"); c != 1 {
+ t.Fatalf("Expected 1 reference for webapp, got %d", c)
+ }
+}
+
+func TestRename(t *testing.T) {
+ // TODO Windows: Port this test
+ if runtime.GOOS == "windows" {
+ t.Skip("Needs porting to Windows")
+ }
+ db, dbpath := newTestDb(t)
+ defer destroyTestDb(dbpath)
+
+ db.Set("/webapp", "1")
+
+ if db.Refs("1") != 1 {
+ t.Fatal("Expect reference count to be 1")
+ }
+
+ db.Set("/db", "2")
+ db.Set("/webapp/db", "2")
+
+ if db.Get("/webapp/db") == nil {
+ t.Fatal("Cannot find entity at path /webapp/db")
+ }
+
+ if err := db.Rename("/webapp/db", "/webapp/newdb"); err != nil {
+ t.Fatal(err)
+ }
+ if db.Get("/webapp/db") != nil {
+ t.Fatal("Entity should not exist at /webapp/db")
+ }
+ if db.Get("/webapp/newdb") == nil {
+ t.Fatal("Cannot find entity at path /webapp/newdb")
+ }
+
+}
+
+func TestCreateMultipleNames(t *testing.T) {
+ // TODO Windows: Port this test
+ if runtime.GOOS == "windows" {
+ t.Skip("Needs porting to Windows")
+ }
+
+ db, dbpath := newTestDb(t)
+ defer destroyTestDb(dbpath)
+
+ db.Set("/db", "1")
+ if _, err := db.Set("/myapp", "1"); err != nil {
+ t.Fatal(err)
+ }
+
+ db.Walk("/", func(p string, e *Entity) error {
+ t.Logf("%s\n", p)
+ return nil
+ }, -1)
+}
+
+func TestRefPaths(t *testing.T) {
+ db, dbpath := newTestDb(t)
+ defer destroyTestDb(dbpath)
+
+ db.Set("/webapp", "1")
+
+ db.Set("/db", "2")
+ db.Set("/webapp/db", "2")
+
+ refs := db.RefPaths("2")
+ if len(refs) != 2 {
+ t.Fatalf("Expected reference count to be 2, got %d", len(refs))
+ }
+}
+
+func TestExistsTrue(t *testing.T) {
+ db, dbpath := newTestDb(t)
+ defer destroyTestDb(dbpath)
+
+ db.Set("/testing", "1")
+
+ if !db.Exists("/testing") {
+ t.Fatalf("/tesing should exist")
+ }
+}
+
+func TestExistsFalse(t *testing.T) {
+ // TODO Windows: Port this test
+ if runtime.GOOS == "windows" {
+ t.Skip("Needs porting to Windows")
+ }
+ db, dbpath := newTestDb(t)
+ defer destroyTestDb(dbpath)
+
+ db.Set("/toerhe", "1")
+
+ if db.Exists("/testing") {
+ t.Fatalf("/tesing should not exist")
+ }
+
+}
+
+func TestGetNameWithTrailingSlash(t *testing.T) {
+ db, dbpath := newTestDb(t)
+ defer destroyTestDb(dbpath)
+
+ db.Set("/todo", "1")
+
+ e := db.Get("/todo/")
+ if e == nil {
+ t.Fatalf("Entity should not be nil")
+ }
+}
+
+func TestConcurrentWrites(t *testing.T) {
+ // TODO Windows: Port this test
+ if runtime.GOOS == "windows" {
+ t.Skip("Needs porting to Windows")
+ }
+ db, dbpath := newTestDb(t)
+ defer destroyTestDb(dbpath)
+
+ errs := make(chan error, 2)
+
+ save := func(name string, id string) {
+ if _, err := db.Set(fmt.Sprintf("/%s", name), id); err != nil {
+ errs <- err
+ }
+ errs <- nil
+ }
+ purge := func(id string) {
+ if _, err := db.Purge(id); err != nil {
+ errs <- err
+ }
+ errs <- nil
+ }
+
+ save("/1", "1")
+
+ go purge("1")
+ go save("/2", "2")
+
+ any := false
+ for i := 0; i < 2; i++ {
+ if err := <-errs; err != nil {
+ any = true
+ t.Log(err)
+ }
+ }
+ if any {
+ t.Fail()
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/graphdb/sort.go b/vendor/github.com/docker/docker/pkg/graphdb/sort.go
new file mode 100644
index 00000000000..c07df077d8b
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/graphdb/sort.go
@@ -0,0 +1,27 @@
+package graphdb
+
+import "sort"
+
+type pathSorter struct {
+ paths []string
+ by func(i, j string) bool
+}
+
+func sortByDepth(paths []string) {
+ s := &pathSorter{paths, func(i, j string) bool {
+ return PathDepth(i) > PathDepth(j)
+ }}
+ sort.Sort(s)
+}
+
+func (s *pathSorter) Len() int {
+ return len(s.paths)
+}
+
+func (s *pathSorter) Swap(i, j int) {
+ s.paths[i], s.paths[j] = s.paths[j], s.paths[i]
+}
+
+func (s *pathSorter) Less(i, j int) bool {
+ return s.by(s.paths[i], s.paths[j])
+}
diff --git a/vendor/github.com/docker/docker/pkg/graphdb/sort_test.go b/vendor/github.com/docker/docker/pkg/graphdb/sort_test.go
new file mode 100644
index 00000000000..ddf2266f60e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/graphdb/sort_test.go
@@ -0,0 +1,29 @@
+package graphdb
+
+import (
+ "testing"
+)
+
+func TestSort(t *testing.T) {
+ paths := []string{
+ "/",
+ "/myreallylongname",
+ "/app/db",
+ }
+
+ sortByDepth(paths)
+
+ if len(paths) != 3 {
+ t.Fatalf("Expected 3 parts got %d", len(paths))
+ }
+
+ if paths[0] != "/app/db" {
+ t.Fatalf("Expected /app/db got %s", paths[0])
+ }
+ if paths[1] != "/myreallylongname" {
+ t.Fatalf("Expected /myreallylongname got %s", paths[1])
+ }
+ if paths[2] != "/" {
+ t.Fatalf("Expected / got %s", paths[2])
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/graphdb/utils.go b/vendor/github.com/docker/docker/pkg/graphdb/utils.go
new file mode 100644
index 00000000000..9edd79c35e1
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/graphdb/utils.go
@@ -0,0 +1,32 @@
+package graphdb
+
+import (
+ "path"
+ "strings"
+)
+
+// Split p on /
+func split(p string) []string {
+ return strings.Split(p, "/")
+}
+
+// PathDepth returns the depth or number of / in a given path
+func PathDepth(p string) int {
+ parts := split(p)
+ if len(parts) == 2 && parts[1] == "" {
+ return 1
+ }
+ return len(parts)
+}
+
+func splitPath(p string) (parent, name string) {
+ if p[0] != '/' {
+ p = "/" + p
+ }
+ parent, name = path.Split(p)
+ l := len(parent)
+ if parent[l-1] == '/' {
+ parent = parent[:l-1]
+ }
+ return
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go b/vendor/github.com/docker/docker/pkg/homedir/homedir.go
similarity index 90%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go
rename to vendor/github.com/docker/docker/pkg/homedir/homedir.go
index dcae1788245..8154e83f0c9 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go
+++ b/vendor/github.com/docker/docker/pkg/homedir/homedir.go
@@ -4,7 +4,7 @@ import (
"os"
"runtime"
- "github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user"
+ "github.com/opencontainers/runc/libcontainer/user"
)
// Key returns the env var name for the user's home dir based on
diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_test.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_test.go
new file mode 100644
index 00000000000..7a95cb2bd7d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_test.go
@@ -0,0 +1,24 @@
+package homedir
+
+import (
+ "path/filepath"
+ "testing"
+)
+
+func TestGet(t *testing.T) {
+ home := Get()
+ if home == "" {
+ t.Fatal("returned home directory is empty")
+ }
+
+ if !filepath.IsAbs(home) {
+ t.Fatalf("returned path is not absolute: %s", home)
+ }
+}
+
+func TestGetShortcutString(t *testing.T) {
+ shortcut := GetShortcutString()
+ if shortcut == "" {
+ t.Fatal("returned shortcut string is empty")
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/httputils/httputils.go b/vendor/github.com/docker/docker/pkg/httputils/httputils.go
new file mode 100644
index 00000000000..d7dc43877df
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/httputils/httputils.go
@@ -0,0 +1,56 @@
+package httputils
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "regexp"
+ "strings"
+
+ "github.com/docker/docker/pkg/jsonmessage"
+)
+
+var (
+ headerRegexp = regexp.MustCompile(`^(?:(.+)/(.+?))\((.+)\).*$`)
+ errInvalidHeader = errors.New("Bad header, should be in format `docker/version (platform)`")
+)
+
+// Download requests a given URL and returns an io.Reader.
+func Download(url string) (resp *http.Response, err error) {
+ if resp, err = http.Get(url); err != nil {
+ return nil, err
+ }
+ if resp.StatusCode >= 400 {
+ return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status)
+ }
+ return resp, nil
+}
+
+// NewHTTPRequestError returns a JSON response error.
+func NewHTTPRequestError(msg string, res *http.Response) error {
+ return &jsonmessage.JSONError{
+ Message: msg,
+ Code: res.StatusCode,
+ }
+}
+
+// ServerHeader contains the server information.
+type ServerHeader struct {
+ App string // docker
+ Ver string // 1.8.0-dev
+ OS string // windows or linux
+}
+
+// ParseServerHeader extracts pieces from an HTTP server header
+// which is in the format "docker/version (os)" eg docker/1.8.0-dev (windows).
+func ParseServerHeader(hdr string) (*ServerHeader, error) {
+ matches := headerRegexp.FindStringSubmatch(hdr)
+ if len(matches) != 4 {
+ return nil, errInvalidHeader
+ }
+ return &ServerHeader{
+ App: strings.TrimSpace(matches[1]),
+ Ver: strings.TrimSpace(matches[2]),
+ OS: strings.TrimSpace(matches[3]),
+ }, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/httputils/httputils_test.go b/vendor/github.com/docker/docker/pkg/httputils/httputils_test.go
new file mode 100644
index 00000000000..d35d0821567
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/httputils/httputils_test.go
@@ -0,0 +1,115 @@
+package httputils
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+)
+
+func TestDownload(t *testing.T) {
+ expected := "Hello, docker !"
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ fmt.Fprintf(w, expected)
+ }))
+ defer ts.Close()
+ response, err := Download(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ actual, err := ioutil.ReadAll(response.Body)
+ response.Body.Close()
+
+ if err != nil || string(actual) != expected {
+ t.Fatalf("Expected the response %q, got err:%v, response:%v, actual:%s", expected, err, response, string(actual))
+ }
+}
+
+func TestDownload400Errors(t *testing.T) {
+ expectedError := "Got HTTP status code >= 400: 403 Forbidden"
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // 403
+ http.Error(w, "something failed (forbidden)", http.StatusForbidden)
+ }))
+ defer ts.Close()
+ // Expected status code = 403
+ if _, err := Download(ts.URL); err == nil || err.Error() != expectedError {
+ t.Fatalf("Expected the the error %q, got %v", expectedError, err)
+ }
+}
+
+func TestDownloadOtherErrors(t *testing.T) {
+ if _, err := Download("I'm not an url.."); err == nil || !strings.Contains(err.Error(), "unsupported protocol scheme") {
+ t.Fatalf("Expected an error with 'unsupported protocol scheme', got %v", err)
+ }
+}
+
+func TestNewHTTPRequestError(t *testing.T) {
+ errorMessage := "Some error message"
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // 403
+ http.Error(w, errorMessage, http.StatusForbidden)
+ }))
+ defer ts.Close()
+ httpResponse, err := http.Get(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := NewHTTPRequestError(errorMessage, httpResponse); err.Error() != errorMessage {
+ t.Fatalf("Expected err to be %q, got %v", errorMessage, err)
+ }
+}
+
+func TestParseServerHeader(t *testing.T) {
+ inputs := map[string][]string{
+ "bad header": {"error"},
+ "(bad header)": {"error"},
+ "(without/spaces)": {"error"},
+ "(header/with spaces)": {"error"},
+ "foo/bar (baz)": {"foo", "bar", "baz"},
+ "foo/bar": {"error"},
+ "foo": {"error"},
+ "foo/bar (baz space)": {"foo", "bar", "baz space"},
+ " f f / b b ( b s ) ": {"f f", "b b", "b s"},
+ "foo/bar (baz) ignore": {"foo", "bar", "baz"},
+ "foo/bar ()": {"error"},
+ "foo/bar()": {"error"},
+ "foo/bar(baz)": {"foo", "bar", "baz"},
+ "foo/bar/zzz(baz)": {"foo/bar", "zzz", "baz"},
+ "foo/bar(baz/abc)": {"foo", "bar", "baz/abc"},
+ "foo/bar(baz (abc))": {"foo", "bar", "baz (abc)"},
+ }
+
+ for header, values := range inputs {
+ serverHeader, err := ParseServerHeader(header)
+ if err != nil {
+ if err != errInvalidHeader {
+ t.Fatalf("Failed to parse %q, and got some unexpected error: %q", header, err)
+ }
+ if values[0] == "error" {
+ continue
+ }
+ t.Fatalf("Header %q failed to parse when it shouldn't have", header)
+ }
+ if values[0] == "error" {
+ t.Fatalf("Header %q parsed ok when it should have failed(%q).", header, serverHeader)
+ }
+
+ if serverHeader.App != values[0] {
+ t.Fatalf("Expected serverHeader.App for %q to equal %q, got %q", header, values[0], serverHeader.App)
+ }
+
+ if serverHeader.Ver != values[1] {
+ t.Fatalf("Expected serverHeader.Ver for %q to equal %q, got %q", header, values[1], serverHeader.Ver)
+ }
+
+ if serverHeader.OS != values[2] {
+ t.Fatalf("Expected serverHeader.OS for %q to equal %q, got %q", header, values[2], serverHeader.OS)
+ }
+
+ }
+
+}
diff --git a/vendor/github.com/docker/docker/pkg/httputils/mimetype.go b/vendor/github.com/docker/docker/pkg/httputils/mimetype.go
new file mode 100644
index 00000000000..d5cf34e4f20
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/httputils/mimetype.go
@@ -0,0 +1,30 @@
+package httputils
+
+import (
+ "mime"
+ "net/http"
+)
+
+// MimeTypes stores the MIME content type.
+var MimeTypes = struct {
+ TextPlain string
+ Tar string
+ OctetStream string
+}{"text/plain", "application/tar", "application/octet-stream"}
+
+// DetectContentType returns a best guess representation of the MIME
+// content type for the bytes at c. The value detected by
+// http.DetectContentType is guaranteed not be nil, defaulting to
+// application/octet-stream when a better guess cannot be made. The
+// result of this detection is then run through mime.ParseMediaType()
+// which separates the actual MIME string from any parameters.
+func DetectContentType(c []byte) (string, map[string]string, error) {
+
+ ct := http.DetectContentType(c)
+ contentType, args, err := mime.ParseMediaType(ct)
+ if err != nil {
+ return "", nil, err
+ }
+
+ return contentType, args, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/httputils/mimetype_test.go b/vendor/github.com/docker/docker/pkg/httputils/mimetype_test.go
new file mode 100644
index 00000000000..9de433ee8cc
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/httputils/mimetype_test.go
@@ -0,0 +1,13 @@
+package httputils
+
+import (
+ "testing"
+)
+
+func TestDetectContentType(t *testing.T) {
+ input := []byte("That is just a plain text")
+
+ if contentType, _, err := DetectContentType(input); err != nil || contentType != "text/plain" {
+ t.Errorf("TestDetectContentType failed")
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader.go b/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader.go
new file mode 100644
index 00000000000..bebc8608cd9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader.go
@@ -0,0 +1,95 @@
+package httputils
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+)
+
+type resumableRequestReader struct {
+ client *http.Client
+ request *http.Request
+ lastRange int64
+ totalSize int64
+ currentResponse *http.Response
+ failures uint32
+ maxFailures uint32
+}
+
+// ResumableRequestReader makes it possible to resume reading a request's body transparently
+// maxfail is the number of times we retry to make requests again (not resumes)
+// totalsize is the total length of the body; auto detect if not provided
+func ResumableRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser {
+ return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize}
+}
+
+// ResumableRequestReaderWithInitialResponse makes it possible to resume
+// reading the body of an already initiated request.
+func ResumableRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser {
+ return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse}
+}
+
+func (r *resumableRequestReader) Read(p []byte) (n int, err error) {
+ if r.client == nil || r.request == nil {
+ return 0, fmt.Errorf("client and request can't be nil\n")
+ }
+ isFreshRequest := false
+ if r.lastRange != 0 && r.currentResponse == nil {
+ readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize)
+ r.request.Header.Set("Range", readRange)
+ time.Sleep(5 * time.Second)
+ }
+ if r.currentResponse == nil {
+ r.currentResponse, err = r.client.Do(r.request)
+ isFreshRequest = true
+ }
+ if err != nil && r.failures+1 != r.maxFailures {
+ r.cleanUpResponse()
+ r.failures++
+ time.Sleep(5 * time.Duration(r.failures) * time.Second)
+ return 0, nil
+ } else if err != nil {
+ r.cleanUpResponse()
+ return 0, err
+ }
+ if r.currentResponse.StatusCode == 416 && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 {
+ r.cleanUpResponse()
+ return 0, io.EOF
+ } else if r.currentResponse.StatusCode != 206 && r.lastRange != 0 && isFreshRequest {
+ r.cleanUpResponse()
+ return 0, fmt.Errorf("the server doesn't support byte ranges")
+ }
+ if r.totalSize == 0 {
+ r.totalSize = r.currentResponse.ContentLength
+ } else if r.totalSize <= 0 {
+ r.cleanUpResponse()
+ return 0, fmt.Errorf("failed to auto detect content length")
+ }
+ n, err = r.currentResponse.Body.Read(p)
+ r.lastRange += int64(n)
+ if err != nil {
+ r.cleanUpResponse()
+ }
+ if err != nil && err != io.EOF {
+ logrus.Infof("encountered error during pull and clearing it before resume: %s", err)
+ err = nil
+ }
+ return n, err
+}
+
+func (r *resumableRequestReader) Close() error {
+ r.cleanUpResponse()
+ r.client = nil
+ r.request = nil
+ return nil
+}
+
+func (r *resumableRequestReader) cleanUpResponse() {
+ if r.currentResponse != nil {
+ r.currentResponse.Body.Close()
+ r.currentResponse = nil
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go b/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go
new file mode 100644
index 00000000000..5a2906db773
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/httputils/resumablerequestreader_test.go
@@ -0,0 +1,307 @@
+package httputils
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+)
+
+func TestResumableRequestHeaderSimpleErrors(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ fmt.Fprintln(w, "Hello, world !")
+ }))
+ defer ts.Close()
+
+ client := &http.Client{}
+
+ var req *http.Request
+ req, err := http.NewRequest("GET", ts.URL, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectedError := "client and request can't be nil\n"
+ resreq := &resumableRequestReader{}
+ _, err = resreq.Read([]byte{})
+ if err == nil || err.Error() != expectedError {
+ t.Fatalf("Expected an error with '%s', got %v.", expectedError, err)
+ }
+
+ resreq = &resumableRequestReader{
+ client: client,
+ request: req,
+ totalSize: -1,
+ }
+ expectedError = "failed to auto detect content length"
+ _, err = resreq.Read([]byte{})
+ if err == nil || err.Error() != expectedError {
+ t.Fatalf("Expected an error with '%s', got %v.", expectedError, err)
+ }
+
+}
+
+// Not too much failures, bails out after some wait
+func TestResumableRequestHeaderNotTooMuchFailures(t *testing.T) {
+ client := &http.Client{}
+
+ var badReq *http.Request
+ badReq, err := http.NewRequest("GET", "I'm not an url", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resreq := &resumableRequestReader{
+ client: client,
+ request: badReq,
+ failures: 0,
+ maxFailures: 2,
+ }
+ read, err := resreq.Read([]byte{})
+ if err != nil || read != 0 {
+ t.Fatalf("Expected no error and no byte read, got err:%v, read:%v.", err, read)
+ }
+}
+
+// Too much failures, returns the error
+func TestResumableRequestHeaderTooMuchFailures(t *testing.T) {
+ client := &http.Client{}
+
+ var badReq *http.Request
+ badReq, err := http.NewRequest("GET", "I'm not an url", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resreq := &resumableRequestReader{
+ client: client,
+ request: badReq,
+ failures: 0,
+ maxFailures: 1,
+ }
+ defer resreq.Close()
+
+ expectedError := `Get I%27m%20not%20an%20url: unsupported protocol scheme ""`
+ read, err := resreq.Read([]byte{})
+ if err == nil || err.Error() != expectedError || read != 0 {
+ t.Fatalf("Expected the error '%s', got err:%v, read:%v.", expectedError, err, read)
+ }
+}
+
+type errorReaderCloser struct{}
+
+func (errorReaderCloser) Close() error { return nil }
+
+func (errorReaderCloser) Read(p []byte) (n int, err error) {
+ return 0, fmt.Errorf("An error occurred")
+}
+
+// If an unknown error is encountered, return 0, nil and log it
+func TestResumableRequestReaderWithReadError(t *testing.T) {
+ var req *http.Request
+ req, err := http.NewRequest("GET", "", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ client := &http.Client{}
+
+ response := &http.Response{
+ Status: "500 Internal Server",
+ StatusCode: 500,
+ ContentLength: 0,
+ Close: true,
+ Body: errorReaderCloser{},
+ }
+
+ resreq := &resumableRequestReader{
+ client: client,
+ request: req,
+ currentResponse: response,
+ lastRange: 1,
+ totalSize: 1,
+ }
+ defer resreq.Close()
+
+ buf := make([]byte, 1)
+ read, err := resreq.Read(buf)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if read != 0 {
+ t.Fatalf("Expected to have read nothing, but read %v", read)
+ }
+}
+
+func TestResumableRequestReaderWithEOFWith416Response(t *testing.T) {
+ var req *http.Request
+ req, err := http.NewRequest("GET", "", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ client := &http.Client{}
+
+ response := &http.Response{
+ Status: "416 Requested Range Not Satisfiable",
+ StatusCode: 416,
+ ContentLength: 0,
+ Close: true,
+ Body: ioutil.NopCloser(strings.NewReader("")),
+ }
+
+ resreq := &resumableRequestReader{
+ client: client,
+ request: req,
+ currentResponse: response,
+ lastRange: 1,
+ totalSize: 1,
+ }
+ defer resreq.Close()
+
+ buf := make([]byte, 1)
+ _, err = resreq.Read(buf)
+ if err == nil || err != io.EOF {
+ t.Fatalf("Expected an io.EOF error, got %v", err)
+ }
+}
+
+func TestResumableRequestReaderWithServerDoesntSupportByteRanges(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.Header.Get("Range") == "" {
+ t.Fatalf("Expected a Range HTTP header, got nothing")
+ }
+ }))
+ defer ts.Close()
+
+ var req *http.Request
+ req, err := http.NewRequest("GET", ts.URL, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ client := &http.Client{}
+
+ resreq := &resumableRequestReader{
+ client: client,
+ request: req,
+ lastRange: 1,
+ }
+ defer resreq.Close()
+
+ buf := make([]byte, 2)
+ _, err = resreq.Read(buf)
+ if err == nil || err.Error() != "the server doesn't support byte ranges" {
+ t.Fatalf("Expected an error 'the server doesn't support byte ranges', got %v", err)
+ }
+}
+
+func TestResumableRequestReaderWithZeroTotalSize(t *testing.T) {
+
+ srvtxt := "some response text data"
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ fmt.Fprintln(w, srvtxt)
+ }))
+ defer ts.Close()
+
+ var req *http.Request
+ req, err := http.NewRequest("GET", ts.URL, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ client := &http.Client{}
+ retries := uint32(5)
+
+ resreq := ResumableRequestReader(client, req, retries, 0)
+ defer resreq.Close()
+
+ data, err := ioutil.ReadAll(resreq)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resstr := strings.TrimSuffix(string(data), "\n")
+
+ if resstr != srvtxt {
+ t.Errorf("resstr != srvtxt")
+ }
+}
+
+func TestResumableRequestReader(t *testing.T) {
+
+ srvtxt := "some response text data"
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ fmt.Fprintln(w, srvtxt)
+ }))
+ defer ts.Close()
+
+ var req *http.Request
+ req, err := http.NewRequest("GET", ts.URL, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ client := &http.Client{}
+ retries := uint32(5)
+ imgSize := int64(len(srvtxt))
+
+ resreq := ResumableRequestReader(client, req, retries, imgSize)
+ defer resreq.Close()
+
+ data, err := ioutil.ReadAll(resreq)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resstr := strings.TrimSuffix(string(data), "\n")
+
+ if resstr != srvtxt {
+ t.Errorf("resstr != srvtxt")
+ }
+}
+
+func TestResumableRequestReaderWithInitialResponse(t *testing.T) {
+
+ srvtxt := "some response text data"
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ fmt.Fprintln(w, srvtxt)
+ }))
+ defer ts.Close()
+
+ var req *http.Request
+ req, err := http.NewRequest("GET", ts.URL, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ client := &http.Client{}
+ retries := uint32(5)
+ imgSize := int64(len(srvtxt))
+
+ res, err := client.Do(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resreq := ResumableRequestReaderWithInitialResponse(client, req, retries, imgSize, res)
+ defer resreq.Close()
+
+ data, err := ioutil.ReadAll(resreq)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resstr := strings.TrimSuffix(string(data), "\n")
+
+ if resstr != srvtxt {
+ t.Errorf("resstr != srvtxt")
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go
new file mode 100644
index 00000000000..6bca466286f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools.go
@@ -0,0 +1,197 @@
+package idtools
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// IDMap contains a single entry for user namespace range remapping. An array
+// of IDMap entries represents the structure that will be provided to the Linux
+// kernel for creating a user namespace.
+type IDMap struct {
+ ContainerID int `json:"container_id"`
+ HostID int `json:"host_id"`
+ Size int `json:"size"`
+}
+
+type subIDRange struct {
+ Start int
+ Length int
+}
+
+type ranges []subIDRange
+
+func (e ranges) Len() int { return len(e) }
+func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
+func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start }
+
+const (
+ subuidFileName string = "/etc/subuid"
+ subgidFileName string = "/etc/subgid"
+)
+
+// MkdirAllAs creates a directory (include any along the path) and then modifies
+// ownership to the requested uid/gid. If the directory already exists, this
+// function will still change ownership to the requested uid/gid pair.
+func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
+ return mkdirAs(path, mode, ownerUID, ownerGID, true, true)
+}
+
+// MkdirAllNewAs creates a directory (include any along the path) and then modifies
+// ownership ONLY of newly created directories to the requested uid/gid. If the
+// directories along the path exist, no change of ownership will be performed
+func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
+ return mkdirAs(path, mode, ownerUID, ownerGID, true, false)
+}
+
+// MkdirAs creates a directory and then modifies ownership to the requested uid/gid.
+// If the directory already exists, this function still changes ownership
+func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
+ return mkdirAs(path, mode, ownerUID, ownerGID, false, true)
+}
+
+// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
+// If the maps are empty, then the root uid/gid will default to "real" 0/0
+func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
+ var uid, gid int
+
+ if uidMap != nil {
+ xUID, err := ToHost(0, uidMap)
+ if err != nil {
+ return -1, -1, err
+ }
+ uid = xUID
+ }
+ if gidMap != nil {
+ xGID, err := ToHost(0, gidMap)
+ if err != nil {
+ return -1, -1, err
+ }
+ gid = xGID
+ }
+ return uid, gid, nil
+}
+
+// ToContainer takes an id mapping, and uses it to translate a
+// host ID to the remapped ID. If no map is provided, then the translation
+// assumes a 1-to-1 mapping and returns the passed in id
+func ToContainer(hostID int, idMap []IDMap) (int, error) {
+ if idMap == nil {
+ return hostID, nil
+ }
+ for _, m := range idMap {
+ if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) {
+ contID := m.ContainerID + (hostID - m.HostID)
+ return contID, nil
+ }
+ }
+ return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
+}
+
+// ToHost takes an id mapping and a remapped ID, and translates the
+// ID to the mapped host ID. If no map is provided, then the translation
+// assumes a 1-to-1 mapping and returns the passed in id #
+func ToHost(contID int, idMap []IDMap) (int, error) {
+ if idMap == nil {
+ return contID, nil
+ }
+ for _, m := range idMap {
+ if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) {
+ hostID := m.HostID + (contID - m.ContainerID)
+ return hostID, nil
+ }
+ }
+ return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID)
+}
+
+// CreateIDMappings takes a requested user and group name and
+// using the data from /etc/sub{uid,gid} ranges, creates the
+// proper uid and gid remapping ranges for that user/group pair
+func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) {
+ subuidRanges, err := parseSubuid(username)
+ if err != nil {
+ return nil, nil, err
+ }
+ subgidRanges, err := parseSubgid(groupname)
+ if err != nil {
+ return nil, nil, err
+ }
+ if len(subuidRanges) == 0 {
+ return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username)
+ }
+ if len(subgidRanges) == 0 {
+ return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname)
+ }
+
+ return createIDMap(subuidRanges), createIDMap(subgidRanges), nil
+}
+
+func createIDMap(subidRanges ranges) []IDMap {
+ idMap := []IDMap{}
+
+ // sort the ranges by lowest ID first
+ sort.Sort(subidRanges)
+ containerID := 0
+ for _, idrange := range subidRanges {
+ idMap = append(idMap, IDMap{
+ ContainerID: containerID,
+ HostID: idrange.Start,
+ Size: idrange.Length,
+ })
+ containerID = containerID + idrange.Length
+ }
+ return idMap
+}
+
+func parseSubuid(username string) (ranges, error) {
+ return parseSubidFile(subuidFileName, username)
+}
+
+func parseSubgid(username string) (ranges, error) {
+ return parseSubidFile(subgidFileName, username)
+}
+
+// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid)
+// and return all found ranges for a specified username. If the special value
+// "ALL" is supplied for username, then all ranges in the file will be returned
+func parseSubidFile(path, username string) (ranges, error) {
+ var rangeList ranges
+
+ subidFile, err := os.Open(path)
+ if err != nil {
+ return rangeList, err
+ }
+ defer subidFile.Close()
+
+ s := bufio.NewScanner(subidFile)
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return rangeList, err
+ }
+
+ text := strings.TrimSpace(s.Text())
+ if text == "" || strings.HasPrefix(text, "#") {
+ continue
+ }
+ parts := strings.Split(text, ":")
+ if len(parts) != 3 {
+ return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path)
+ }
+ if parts[0] == username || username == "ALL" {
+ startid, err := strconv.Atoi(parts[1])
+ if err != nil {
+ return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
+ }
+ length, err := strconv.Atoi(parts[2])
+ if err != nil {
+ return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
+ }
+ rangeList = append(rangeList, subIDRange{startid, length})
+ }
+ }
+ return rangeList, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
new file mode 100644
index 00000000000..b57d6ef125e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
@@ -0,0 +1,60 @@
+// +build !windows
+
+package idtools
+
+import (
+ "os"
+ "path/filepath"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
+ // make an array containing the original path asked for, plus (for mkAll == true)
+ // all path components leading up to the complete path that don't exist before we MkdirAll
+ // so that we can chown all of them properly at the end. If chownExisting is false, we won't
+ // chown the full directory path if it exists
+ var paths []string
+ if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
+ paths = []string{path}
+ } else if err == nil && chownExisting {
+ if err := os.Chown(path, ownerUID, ownerGID); err != nil {
+ return err
+ }
+ // short-circuit--we were called with an existing directory and chown was requested
+ return nil
+ } else if err == nil {
+ // nothing to do; directory path fully exists already and chown was NOT requested
+ return nil
+ }
+
+ if mkAll {
+ // walk back to "/" looking for directories which do not exist
+ // and add them to the paths array for chown after creation
+ dirPath := path
+ for {
+ dirPath = filepath.Dir(dirPath)
+ if dirPath == "/" {
+ break
+ }
+ if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) {
+ paths = append(paths, dirPath)
+ }
+ }
+ if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
+ return err
+ }
+ } else {
+ if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) {
+ return err
+ }
+ }
+ // even if it existed, we will chown the requested path + any subpaths that
+ // didn't exist when we called MkdirAll
+ for _, pathComponent := range paths {
+ if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go
new file mode 100644
index 00000000000..540d3079ee2
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix_test.go
@@ -0,0 +1,271 @@
+// +build !windows
+
+package idtools
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "syscall"
+ "testing"
+)
+
+type node struct {
+ uid int
+ gid int
+}
+
+func TestMkdirAllAs(t *testing.T) {
+ dirName, err := ioutil.TempDir("", "mkdirall")
+ if err != nil {
+ t.Fatalf("Couldn't create temp dir: %v", err)
+ }
+ defer os.RemoveAll(dirName)
+
+ testTree := map[string]node{
+ "usr": {0, 0},
+ "usr/bin": {0, 0},
+ "lib": {33, 33},
+ "lib/x86_64": {45, 45},
+ "lib/x86_64/share": {1, 1},
+ }
+
+ if err := buildTree(dirName, testTree); err != nil {
+ t.Fatal(err)
+ }
+
+ // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid
+ if err := MkdirAllAs(filepath.Join(dirName, "usr", "share"), 0755, 99, 99); err != nil {
+ t.Fatal(err)
+ }
+ testTree["usr/share"] = node{99, 99}
+ verifyTree, err := readTree(dirName, "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := compareTrees(testTree, verifyTree); err != nil {
+ t.Fatal(err)
+ }
+
+ // test 2-deep new directories--both should be owned by the uid/gid pair
+ if err := MkdirAllAs(filepath.Join(dirName, "lib", "some", "other"), 0755, 101, 101); err != nil {
+ t.Fatal(err)
+ }
+ testTree["lib/some"] = node{101, 101}
+ testTree["lib/some/other"] = node{101, 101}
+ verifyTree, err = readTree(dirName, "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := compareTrees(testTree, verifyTree); err != nil {
+ t.Fatal(err)
+ }
+
+ // test a directory that already exists; should be chowned, but nothing else
+ if err := MkdirAllAs(filepath.Join(dirName, "usr"), 0755, 102, 102); err != nil {
+ t.Fatal(err)
+ }
+ testTree["usr"] = node{102, 102}
+ verifyTree, err = readTree(dirName, "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := compareTrees(testTree, verifyTree); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestMkdirAllNewAs(t *testing.T) {
+
+ dirName, err := ioutil.TempDir("", "mkdirnew")
+ if err != nil {
+ t.Fatalf("Couldn't create temp dir: %v", err)
+ }
+ defer os.RemoveAll(dirName)
+
+ testTree := map[string]node{
+ "usr": {0, 0},
+ "usr/bin": {0, 0},
+ "lib": {33, 33},
+ "lib/x86_64": {45, 45},
+ "lib/x86_64/share": {1, 1},
+ }
+
+ if err := buildTree(dirName, testTree); err != nil {
+ t.Fatal(err)
+ }
+
+ // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid
+ if err := MkdirAllNewAs(filepath.Join(dirName, "usr", "share"), 0755, 99, 99); err != nil {
+ t.Fatal(err)
+ }
+ testTree["usr/share"] = node{99, 99}
+ verifyTree, err := readTree(dirName, "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := compareTrees(testTree, verifyTree); err != nil {
+ t.Fatal(err)
+ }
+
+ // test 2-deep new directories--both should be owned by the uid/gid pair
+ if err := MkdirAllNewAs(filepath.Join(dirName, "lib", "some", "other"), 0755, 101, 101); err != nil {
+ t.Fatal(err)
+ }
+ testTree["lib/some"] = node{101, 101}
+ testTree["lib/some/other"] = node{101, 101}
+ verifyTree, err = readTree(dirName, "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := compareTrees(testTree, verifyTree); err != nil {
+ t.Fatal(err)
+ }
+
+ // test a directory that already exists; should NOT be chowned
+ if err := MkdirAllNewAs(filepath.Join(dirName, "usr"), 0755, 102, 102); err != nil {
+ t.Fatal(err)
+ }
+ verifyTree, err = readTree(dirName, "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := compareTrees(testTree, verifyTree); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestMkdirAs(t *testing.T) {
+
+ dirName, err := ioutil.TempDir("", "mkdir")
+ if err != nil {
+ t.Fatalf("Couldn't create temp dir: %v", err)
+ }
+ defer os.RemoveAll(dirName)
+
+ testTree := map[string]node{
+ "usr": {0, 0},
+ }
+ if err := buildTree(dirName, testTree); err != nil {
+ t.Fatal(err)
+ }
+
+ // test a directory that already exists; should just chown to the requested uid/gid
+ if err := MkdirAs(filepath.Join(dirName, "usr"), 0755, 99, 99); err != nil {
+ t.Fatal(err)
+ }
+ testTree["usr"] = node{99, 99}
+ verifyTree, err := readTree(dirName, "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := compareTrees(testTree, verifyTree); err != nil {
+ t.Fatal(err)
+ }
+
+ // create a subdir under a dir which doesn't exist--should fail
+ if err := MkdirAs(filepath.Join(dirName, "usr", "bin", "subdir"), 0755, 102, 102); err == nil {
+ t.Fatalf("Trying to create a directory with Mkdir where the parent doesn't exist should have failed")
+ }
+
+ // create a subdir under an existing dir; should only change the ownership of the new subdir
+ if err := MkdirAs(filepath.Join(dirName, "usr", "bin"), 0755, 102, 102); err != nil {
+ t.Fatal(err)
+ }
+ testTree["usr/bin"] = node{102, 102}
+ verifyTree, err = readTree(dirName, "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := compareTrees(testTree, verifyTree); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func buildTree(base string, tree map[string]node) error {
+ for path, node := range tree {
+ fullPath := filepath.Join(base, path)
+ if err := os.MkdirAll(fullPath, 0755); err != nil {
+ return fmt.Errorf("Couldn't create path: %s; error: %v", fullPath, err)
+ }
+ if err := os.Chown(fullPath, node.uid, node.gid); err != nil {
+ return fmt.Errorf("Couldn't chown path: %s; error: %v", fullPath, err)
+ }
+ }
+ return nil
+}
+
+func readTree(base, root string) (map[string]node, error) {
+ tree := make(map[string]node)
+
+ dirInfos, err := ioutil.ReadDir(base)
+ if err != nil {
+ return nil, fmt.Errorf("Couldn't read directory entries for %q: %v", base, err)
+ }
+
+ for _, info := range dirInfos {
+ s := &syscall.Stat_t{}
+ if err := syscall.Stat(filepath.Join(base, info.Name()), s); err != nil {
+ return nil, fmt.Errorf("Can't stat file %q: %v", filepath.Join(base, info.Name()), err)
+ }
+ tree[filepath.Join(root, info.Name())] = node{int(s.Uid), int(s.Gid)}
+ if info.IsDir() {
+ // read the subdirectory
+ subtree, err := readTree(filepath.Join(base, info.Name()), filepath.Join(root, info.Name()))
+ if err != nil {
+ return nil, err
+ }
+ for path, nodeinfo := range subtree {
+ tree[path] = nodeinfo
+ }
+ }
+ }
+ return tree, nil
+}
+
+func compareTrees(left, right map[string]node) error {
+ if len(left) != len(right) {
+ return fmt.Errorf("Trees aren't the same size")
+ }
+ for path, nodeLeft := range left {
+ if nodeRight, ok := right[path]; ok {
+ if nodeRight.uid != nodeLeft.uid || nodeRight.gid != nodeLeft.gid {
+ // mismatch
+ return fmt.Errorf("mismatched ownership for %q: expected: %d:%d, got: %d:%d", path,
+ nodeLeft.uid, nodeLeft.gid, nodeRight.uid, nodeRight.gid)
+ }
+ continue
+ }
+ return fmt.Errorf("right tree didn't contain path %q", path)
+ }
+ return nil
+}
+
+func TestParseSubidFileWithNewlinesAndComments(t *testing.T) {
+ tmpDir, err := ioutil.TempDir("", "parsesubid")
+ if err != nil {
+ t.Fatal(err)
+ }
+ fnamePath := filepath.Join(tmpDir, "testsubuid")
+ fcontent := `tss:100000:65536
+# empty default subuid/subgid file
+
+dockremap:231072:65536`
+ if err := ioutil.WriteFile(fnamePath, []byte(fcontent), 0644); err != nil {
+ t.Fatal(err)
+ }
+ ranges, err := parseSubidFile(fnamePath, "dockremap")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(ranges) != 1 {
+ t.Fatalf("wanted 1 element in ranges, got %d instead", len(ranges))
+ }
+ if ranges[0].Start != 231072 {
+ t.Fatalf("wanted 231072, got %d instead", ranges[0].Start)
+ }
+ if ranges[0].Length != 65536 {
+ t.Fatalf("wanted 65536, got %d instead", ranges[0].Length)
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
new file mode 100644
index 00000000000..c9e3c937cdc
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
@@ -0,0 +1,18 @@
+// +build windows
+
+package idtools
+
+import (
+ "os"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+// Platforms such as Windows do not support the UID/GID concept. So make this
+// just a wrapper around system.MkdirAll.
+func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
+ if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
new file mode 100644
index 00000000000..4a4aaed04d0
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
@@ -0,0 +1,188 @@
+package idtools
+
+import (
+ "fmt"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+// add a user and/or group to Linux /etc/passwd, /etc/group using standard
+// Linux distribution commands:
+// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group
+// useradd -r -s /bin/false
+
+var (
+ once sync.Once
+ userCommand string
+
+ cmdTemplates = map[string]string{
+ "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s",
+ "useradd": "-r -s /bin/false %s",
+ "usermod": "-%s %d-%d %s",
+ }
+
+ idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`)
+ // default length for a UID/GID subordinate range
+ defaultRangeLen = 65536
+ defaultRangeStart = 100000
+ userMod = "usermod"
+)
+
+func resolveBinary(binname string) (string, error) {
+ binaryPath, err := exec.LookPath(binname)
+ if err != nil {
+ return "", err
+ }
+ resolvedPath, err := filepath.EvalSymlinks(binaryPath)
+ if err != nil {
+ return "", err
+ }
+ //only return no error if the final resolved binary basename
+ //matches what was searched for
+ if filepath.Base(resolvedPath) == binname {
+ return resolvedPath, nil
+ }
+ return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
+}
+
+// AddNamespaceRangesUser takes a username and uses the standard system
+// utility to create a system user/group pair used to hold the
+// /etc/sub{uid,gid} ranges which will be used for user namespace
+// mapping ranges in containers.
+func AddNamespaceRangesUser(name string) (int, int, error) {
+ if err := addUser(name); err != nil {
+ return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err)
+ }
+
+ // Query the system for the created uid and gid pair
+ out, err := execCmd("id", name)
+ if err != nil {
+ return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err)
+ }
+ matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out)))
+ if len(matches) != 3 {
+ return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out))
+ }
+ uid, err := strconv.Atoi(matches[1])
+ if err != nil {
+ return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err)
+ }
+ gid, err := strconv.Atoi(matches[2])
+ if err != nil {
+ return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err)
+ }
+
+ // Now we need to create the subuid/subgid ranges for our new user/group (system users
+ // do not get auto-created ranges in subuid/subgid)
+
+ if err := createSubordinateRanges(name); err != nil {
+ return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err)
+ }
+ return uid, gid, nil
+}
+
+func addUser(userName string) error {
+ once.Do(func() {
+ // set up which commands are used for adding users/groups dependent on distro
+ if _, err := resolveBinary("adduser"); err == nil {
+ userCommand = "adduser"
+ } else if _, err := resolveBinary("useradd"); err == nil {
+ userCommand = "useradd"
+ }
+ })
+ if userCommand == "" {
+ return fmt.Errorf("Cannot add user; no useradd/adduser binary found")
+ }
+ args := fmt.Sprintf(cmdTemplates[userCommand], userName)
+ out, err := execCmd(userCommand, args)
+ if err != nil {
+ return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out))
+ }
+ return nil
+}
+
+func createSubordinateRanges(name string) error {
+
+ // first, we should verify that ranges weren't automatically created
+ // by the distro tooling
+ ranges, err := parseSubuid(name)
+ if err != nil {
+ return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err)
+ }
+ if len(ranges) == 0 {
+ // no UID ranges; let's create one
+ startID, err := findNextUIDRange()
+ if err != nil {
+ return fmt.Errorf("Can't find available subuid range: %v", err)
+ }
+ out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name))
+ if err != nil {
+ return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err)
+ }
+ }
+
+ ranges, err = parseSubgid(name)
+ if err != nil {
+ return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err)
+ }
+ if len(ranges) == 0 {
+ // no GID ranges; let's create one
+ startID, err := findNextGIDRange()
+ if err != nil {
+ return fmt.Errorf("Can't find available subgid range: %v", err)
+ }
+ out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name))
+ if err != nil {
+ return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err)
+ }
+ }
+ return nil
+}
+
+func findNextUIDRange() (int, error) {
+ ranges, err := parseSubuid("ALL")
+ if err != nil {
+ return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err)
+ }
+ sort.Sort(ranges)
+ return findNextRangeStart(ranges)
+}
+
+func findNextGIDRange() (int, error) {
+ ranges, err := parseSubgid("ALL")
+ if err != nil {
+ return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err)
+ }
+ sort.Sort(ranges)
+ return findNextRangeStart(ranges)
+}
+
+func findNextRangeStart(rangeList ranges) (int, error) {
+ startID := defaultRangeStart
+ for _, arange := range rangeList {
+ if wouldOverlap(arange, startID) {
+ startID = arange.Start + arange.Length
+ }
+ }
+ return startID, nil
+}
+
+func wouldOverlap(arange subIDRange, ID int) bool {
+ low := ID
+ high := ID + defaultRangeLen
+ if (low >= arange.Start && low <= arange.Start+arange.Length) ||
+ (high <= arange.Start+arange.Length && high >= arange.Start) {
+ return true
+ }
+ return false
+}
+
+func execCmd(cmd, args string) ([]byte, error) {
+ execCmd := exec.Command(cmd, strings.Split(args, " ")...)
+ return execCmd.CombinedOutput()
+}
diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
new file mode 100644
index 00000000000..d98b354cbd8
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
@@ -0,0 +1,12 @@
+// +build !linux
+
+package idtools
+
+import "fmt"
+
+// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair
+// and calls the appropriate helper function to add the group and then
+// the user to the group in /etc/group and /etc/passwd respectively.
+func AddNamespaceRangesUser(name string) (int, int, error) {
+ return -1, -1, fmt.Errorf("No support for adding users or groups on this OS")
+}
diff --git a/vendor/github.com/docker/docker/pkg/integration/checker/checker.go b/vendor/github.com/docker/docker/pkg/integration/checker/checker.go
new file mode 100644
index 00000000000..d1b703a599f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/integration/checker/checker.go
@@ -0,0 +1,46 @@
+// Package checker provides Docker specific implementations of the go-check.Checker interface.
+package checker
+
+import (
+ "github.com/go-check/check"
+ "github.com/vdemeester/shakers"
+)
+
+// As a commodity, we bring all check.Checker variables into the current namespace to avoid having
+// to think about check.X versus checker.X.
+var (
+ DeepEquals = check.DeepEquals
+ ErrorMatches = check.ErrorMatches
+ FitsTypeOf = check.FitsTypeOf
+ HasLen = check.HasLen
+ Implements = check.Implements
+ IsNil = check.IsNil
+ Matches = check.Matches
+ Not = check.Not
+ NotNil = check.NotNil
+ PanicMatches = check.PanicMatches
+ Panics = check.Panics
+
+ Contains = shakers.Contains
+ ContainsAny = shakers.ContainsAny
+ Count = shakers.Count
+ Equals = shakers.Equals
+ EqualFold = shakers.EqualFold
+ False = shakers.False
+ GreaterOrEqualThan = shakers.GreaterOrEqualThan
+ GreaterThan = shakers.GreaterThan
+ HasPrefix = shakers.HasPrefix
+ HasSuffix = shakers.HasSuffix
+ Index = shakers.Index
+ IndexAny = shakers.IndexAny
+ IsAfter = shakers.IsAfter
+ IsBefore = shakers.IsBefore
+ IsBetween = shakers.IsBetween
+ IsLower = shakers.IsLower
+ IsUpper = shakers.IsUpper
+ LessOrEqualThan = shakers.LessOrEqualThan
+ LessThan = shakers.LessThan
+ TimeEquals = shakers.TimeEquals
+ True = shakers.True
+ TimeIgnore = shakers.TimeIgnore
+)
diff --git a/vendor/github.com/docker/docker/pkg/integration/dockerCmd_utils.go b/vendor/github.com/docker/docker/pkg/integration/dockerCmd_utils.go
new file mode 100644
index 00000000000..fab3e062ddc
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/integration/dockerCmd_utils.go
@@ -0,0 +1,78 @@
+package integration
+
+import (
+ "fmt"
+ "os/exec"
+ "strings"
+ "time"
+
+ "github.com/go-check/check"
+)
+
+// We use the elongated quote mechanism for quoting error returns as
+// the use of strconv.Quote or %q in fmt.Errorf will escape characters. This
+// has a big downside on Windows where the args include paths, so instead
+// of something like c:\directory\file.txt, the output would be
+// c:\\directory\\file.txt. This is highly misleading.
+const quote = `"`
+
+var execCommand = exec.Command
+
+// DockerCmdWithError executes a docker command that is supposed to fail and returns
+// the output, the exit code and the error.
+func DockerCmdWithError(dockerBinary string, args ...string) (string, int, error) {
+ return RunCommandWithOutput(execCommand(dockerBinary, args...))
+}
+
+// DockerCmdWithStdoutStderr executes a docker command and returns the content of the
+// stdout, stderr and the exit code. If a check.C is passed, it will fail and stop tests
+// if the error is not nil.
+func DockerCmdWithStdoutStderr(dockerBinary string, c *check.C, args ...string) (string, string, int) {
+ stdout, stderr, status, err := RunCommandWithStdoutStderr(execCommand(dockerBinary, args...))
+ if c != nil {
+ c.Assert(err, check.IsNil, check.Commentf(quote+"%v"+quote+" failed with errors: %s, %v", strings.Join(args, " "), stderr, err))
+ }
+ return stdout, stderr, status
+}
+
+// DockerCmd executes a docker command and returns the output and the exit code. If the
+// command returns an error, it will fail and stop the tests.
+func DockerCmd(dockerBinary string, c *check.C, args ...string) (string, int) {
+ out, status, err := RunCommandWithOutput(execCommand(dockerBinary, args...))
+ c.Assert(err, check.IsNil, check.Commentf(quote+"%v"+quote+" failed with errors: %s, %v", strings.Join(args, " "), out, err))
+ return out, status
+}
+
+// DockerCmdWithTimeout executes a docker command with a timeout, and returns the output,
+// the exit code and the error (if any).
+func DockerCmdWithTimeout(dockerBinary string, timeout time.Duration, args ...string) (string, int, error) {
+ out, status, err := RunCommandWithOutputAndTimeout(execCommand(dockerBinary, args...), timeout)
+ if err != nil {
+ return out, status, fmt.Errorf(quote+"%v"+quote+" failed with errors: %v : %q", strings.Join(args, " "), err, out)
+ }
+ return out, status, err
+}
+
+// DockerCmdInDir executes a docker command in a directory and returns the output, the
+// exit code and the error (if any).
+func DockerCmdInDir(dockerBinary string, path string, args ...string) (string, int, error) {
+ dockerCommand := execCommand(dockerBinary, args...)
+ dockerCommand.Dir = path
+ out, status, err := RunCommandWithOutput(dockerCommand)
+ if err != nil {
+ return out, status, fmt.Errorf(quote+"%v"+quote+" failed with errors: %v : %q", strings.Join(args, " "), err, out)
+ }
+ return out, status, err
+}
+
+// DockerCmdInDirWithTimeout executes a docker command in a directory with a timeout and
+// returns the output, the exit code and the error (if any).
+func DockerCmdInDirWithTimeout(dockerBinary string, timeout time.Duration, path string, args ...string) (string, int, error) {
+ dockerCommand := execCommand(dockerBinary, args...)
+ dockerCommand.Dir = path
+ out, status, err := RunCommandWithOutputAndTimeout(dockerCommand, timeout)
+ if err != nil {
+ return out, status, fmt.Errorf(quote+"%v"+quote+" failed with errors: %v : %q", strings.Join(args, " "), err, out)
+ }
+ return out, status, err
+}
diff --git a/vendor/github.com/docker/docker/pkg/integration/dockerCmd_utils_test.go b/vendor/github.com/docker/docker/pkg/integration/dockerCmd_utils_test.go
new file mode 100644
index 00000000000..3dd5d11461e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/integration/dockerCmd_utils_test.go
@@ -0,0 +1,405 @@
+package integration
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "testing"
+
+ "io/ioutil"
+ "strings"
+ "time"
+
+ "github.com/go-check/check"
+)
+
+const dockerBinary = "docker"
+
+// Setup go-check for this test
+func Test(t *testing.T) {
+ check.TestingT(t)
+}
+
+func init() {
+ check.Suite(&DockerCmdSuite{})
+}
+
+type DockerCmdSuite struct{}
+
+// Fake the exec.Command to use our mock.
+func (s *DockerCmdSuite) SetUpTest(c *check.C) {
+ execCommand = fakeExecCommand
+}
+
+// And bring it back to normal after the test.
+func (s *DockerCmdSuite) TearDownTest(c *check.C) {
+ execCommand = exec.Command
+}
+
+// DockerCmdWithError tests
+
+func (s *DockerCmdSuite) TestDockerCmdWithError(c *check.C) {
+ cmds := []struct {
+ binary string
+ args []string
+ expectedOut string
+ expectedExitCode int
+ expectedError error
+ }{
+ {
+ "doesnotexists",
+ []string{},
+ "Command doesnotexists not found.",
+ 1,
+ fmt.Errorf("exit status 1"),
+ },
+ {
+ dockerBinary,
+ []string{"an", "error"},
+ "an error has occurred",
+ 1,
+ fmt.Errorf("exit status 1"),
+ },
+ {
+ dockerBinary,
+ []string{"an", "exitCode", "127"},
+ "an error has occurred with exitCode 127",
+ 127,
+ fmt.Errorf("exit status 127"),
+ },
+ {
+ dockerBinary,
+ []string{"run", "-ti", "ubuntu", "echo", "hello"},
+ "hello",
+ 0,
+ nil,
+ },
+ }
+ for _, cmd := range cmds {
+ out, exitCode, error := DockerCmdWithError(cmd.binary, cmd.args...)
+ c.Assert(out, check.Equals, cmd.expectedOut, check.Commentf("Expected output %q for arguments %v, got %q", cmd.expectedOut, cmd.args, out))
+ c.Assert(exitCode, check.Equals, cmd.expectedExitCode, check.Commentf("Expected exitCode %q for arguments %v, got %q", cmd.expectedExitCode, cmd.args, exitCode))
+ if cmd.expectedError != nil {
+ c.Assert(error, check.NotNil, check.Commentf("Expected an error %q, got nothing", cmd.expectedError))
+ c.Assert(error.Error(), check.Equals, cmd.expectedError.Error(), check.Commentf("Expected error %q for arguments %v, got %q", cmd.expectedError.Error(), cmd.args, error.Error()))
+ } else {
+ c.Assert(error, check.IsNil, check.Commentf("Expected no error, got %v", error))
+ }
+ }
+}
+
+// DockerCmdWithStdoutStderr tests
+
+type dockerCmdWithStdoutStderrErrorSuite struct{}
+
+func (s *dockerCmdWithStdoutStderrErrorSuite) Test(c *check.C) {
+ // Should fail, the test too
+ DockerCmdWithStdoutStderr(dockerBinary, c, "an", "error")
+}
+
+type dockerCmdWithStdoutStderrSuccessSuite struct{}
+
+func (s *dockerCmdWithStdoutStderrSuccessSuite) Test(c *check.C) {
+ stdout, stderr, exitCode := DockerCmdWithStdoutStderr(dockerBinary, c, "run", "-ti", "ubuntu", "echo", "hello")
+ c.Assert(stdout, check.Equals, "hello")
+ c.Assert(stderr, check.Equals, "")
+ c.Assert(exitCode, check.Equals, 0)
+
+}
+
+func (s *DockerCmdSuite) TestDockerCmdWithStdoutStderrError(c *check.C) {
+ // Run error suite, should fail.
+ output := String{}
+ result := check.Run(&dockerCmdWithStdoutStderrErrorSuite{}, &check.RunConf{Output: &output})
+ c.Check(result.Succeeded, check.Equals, 0)
+ c.Check(result.Failed, check.Equals, 1)
+}
+
+func (s *DockerCmdSuite) TestDockerCmdWithStdoutStderrSuccess(c *check.C) {
+ // Run error suite, should fail.
+ output := String{}
+ result := check.Run(&dockerCmdWithStdoutStderrSuccessSuite{}, &check.RunConf{Output: &output})
+ c.Check(result.Succeeded, check.Equals, 1)
+ c.Check(result.Failed, check.Equals, 0)
+}
+
+// DockerCmd tests
+
+type dockerCmdErrorSuite struct{}
+
+func (s *dockerCmdErrorSuite) Test(c *check.C) {
+ // Should fail, the test too
+ DockerCmd(dockerBinary, c, "an", "error")
+}
+
+type dockerCmdSuccessSuite struct{}
+
+func (s *dockerCmdSuccessSuite) Test(c *check.C) {
+ stdout, exitCode := DockerCmd(dockerBinary, c, "run", "-ti", "ubuntu", "echo", "hello")
+ c.Assert(stdout, check.Equals, "hello")
+ c.Assert(exitCode, check.Equals, 0)
+
+}
+
+func (s *DockerCmdSuite) TestDockerCmdError(c *check.C) {
+ // Run error suite, should fail.
+ output := String{}
+ result := check.Run(&dockerCmdErrorSuite{}, &check.RunConf{Output: &output})
+ c.Check(result.Succeeded, check.Equals, 0)
+ c.Check(result.Failed, check.Equals, 1)
+}
+
+func (s *DockerCmdSuite) TestDockerCmdSuccess(c *check.C) {
+ // Run error suite, should fail.
+ output := String{}
+ result := check.Run(&dockerCmdSuccessSuite{}, &check.RunConf{Output: &output})
+ c.Check(result.Succeeded, check.Equals, 1)
+ c.Check(result.Failed, check.Equals, 0)
+}
+
+// DockerCmdWithTimeout tests
+
+func (s *DockerCmdSuite) TestDockerCmdWithTimeout(c *check.C) {
+ cmds := []struct {
+ binary string
+ args []string
+ timeout time.Duration
+ expectedOut string
+ expectedExitCode int
+ expectedError error
+ }{
+ {
+ "doesnotexists",
+ []string{},
+ 200 * time.Millisecond,
+ `Command doesnotexists not found.`,
+ 1,
+ fmt.Errorf(`"" failed with errors: exit status 1 : "Command doesnotexists not found."`),
+ },
+ {
+ dockerBinary,
+ []string{"an", "error"},
+ 200 * time.Millisecond,
+ `an error has occurred`,
+ 1,
+ fmt.Errorf(`"an error" failed with errors: exit status 1 : "an error has occurred"`),
+ },
+ {
+ dockerBinary,
+ []string{"a", "command", "that", "times", "out"},
+ 5 * time.Millisecond,
+ "",
+ 0,
+ fmt.Errorf(`"a command that times out" failed with errors: command timed out : ""`),
+ },
+ {
+ dockerBinary,
+ []string{"run", "-ti", "ubuntu", "echo", "hello"},
+ 200 * time.Millisecond,
+ "hello",
+ 0,
+ nil,
+ },
+ }
+ for _, cmd := range cmds {
+ out, exitCode, error := DockerCmdWithTimeout(cmd.binary, cmd.timeout, cmd.args...)
+ c.Assert(out, check.Equals, cmd.expectedOut, check.Commentf("Expected output %q for arguments %v, got %q", cmd.expectedOut, cmd.args, out))
+ c.Assert(exitCode, check.Equals, cmd.expectedExitCode, check.Commentf("Expected exitCode %q for arguments %v, got %q", cmd.expectedExitCode, cmd.args, exitCode))
+ if cmd.expectedError != nil {
+ c.Assert(error, check.NotNil, check.Commentf("Expected an error %q, got nothing", cmd.expectedError))
+ c.Assert(error.Error(), check.Equals, cmd.expectedError.Error(), check.Commentf("Expected error %q for arguments %v, got %q", cmd.expectedError.Error(), cmd.args, error.Error()))
+ } else {
+ c.Assert(error, check.IsNil, check.Commentf("Expected no error, got %v", error))
+ }
+ }
+}
+
+// DockerCmdInDir tests
+
+func (s *DockerCmdSuite) TestDockerCmdInDir(c *check.C) {
+ tempFolder, err := ioutil.TempDir("", "test-docker-cmd-in-dir")
+ c.Assert(err, check.IsNil)
+
+ cmds := []struct {
+ binary string
+ args []string
+ expectedOut string
+ expectedExitCode int
+ expectedError error
+ }{
+ {
+ "doesnotexists",
+ []string{},
+ `Command doesnotexists not found.`,
+ 1,
+ fmt.Errorf(`"dir:%s" failed with errors: exit status 1 : "Command doesnotexists not found."`, tempFolder),
+ },
+ {
+ dockerBinary,
+ []string{"an", "error"},
+ `an error has occurred`,
+ 1,
+ fmt.Errorf(`"dir:%s an error" failed with errors: exit status 1 : "an error has occurred"`, tempFolder),
+ },
+ {
+ dockerBinary,
+ []string{"run", "-ti", "ubuntu", "echo", "hello"},
+ "hello",
+ 0,
+ nil,
+ },
+ }
+ for _, cmd := range cmds {
+ // We prepend the arguments with dir:thefolder.. the fake command will check
+ // that the current workdir is the same as the one we are passing.
+ args := append([]string{"dir:" + tempFolder}, cmd.args...)
+ out, exitCode, error := DockerCmdInDir(cmd.binary, tempFolder, args...)
+ c.Assert(out, check.Equals, cmd.expectedOut, check.Commentf("Expected output %q for arguments %v, got %q", cmd.expectedOut, cmd.args, out))
+ c.Assert(exitCode, check.Equals, cmd.expectedExitCode, check.Commentf("Expected exitCode %q for arguments %v, got %q", cmd.expectedExitCode, cmd.args, exitCode))
+ if cmd.expectedError != nil {
+ c.Assert(error, check.NotNil, check.Commentf("Expected an error %q, got nothing", cmd.expectedError))
+ c.Assert(error.Error(), check.Equals, cmd.expectedError.Error(), check.Commentf("Expected error %q for arguments %v, got %q", cmd.expectedError.Error(), cmd.args, error.Error()))
+ } else {
+ c.Assert(error, check.IsNil, check.Commentf("Expected no error, got %v", error))
+ }
+ }
+}
+
+// DockerCmdInDirWithTimeout tests
+
+func (s *DockerCmdSuite) TestDockerCmdInDirWithTimeout(c *check.C) {
+ tempFolder, err := ioutil.TempDir("", "test-docker-cmd-in-dir")
+ c.Assert(err, check.IsNil)
+
+ cmds := []struct {
+ binary string
+ args []string
+ timeout time.Duration
+ expectedOut string
+ expectedExitCode int
+ expectedError error
+ }{
+ {
+ "doesnotexists",
+ []string{},
+ 200 * time.Millisecond,
+ `Command doesnotexists not found.`,
+ 1,
+ fmt.Errorf(`"dir:%s" failed with errors: exit status 1 : "Command doesnotexists not found."`, tempFolder),
+ },
+ {
+ dockerBinary,
+ []string{"an", "error"},
+ 200 * time.Millisecond,
+ `an error has occurred`,
+ 1,
+ fmt.Errorf(`"dir:%s an error" failed with errors: exit status 1 : "an error has occurred"`, tempFolder),
+ },
+ {
+ dockerBinary,
+ []string{"a", "command", "that", "times", "out"},
+ 5 * time.Millisecond,
+ "",
+ 0,
+ fmt.Errorf(`"dir:%s a command that times out" failed with errors: command timed out : ""`, tempFolder),
+ },
+ {
+ dockerBinary,
+ []string{"run", "-ti", "ubuntu", "echo", "hello"},
+ 200 * time.Millisecond,
+ "hello",
+ 0,
+ nil,
+ },
+ }
+ for _, cmd := range cmds {
+ // We prepend the arguments with dir:thefolder.. the fake command will check
+ // that the current workdir is the same as the one we are passing.
+ args := append([]string{"dir:" + tempFolder}, cmd.args...)
+ out, exitCode, error := DockerCmdInDirWithTimeout(cmd.binary, cmd.timeout, tempFolder, args...)
+ c.Assert(out, check.Equals, cmd.expectedOut, check.Commentf("Expected output %q for arguments %v, got %q", cmd.expectedOut, cmd.args, out))
+ c.Assert(exitCode, check.Equals, cmd.expectedExitCode, check.Commentf("Expected exitCode %q for arguments %v, got %q", cmd.expectedExitCode, cmd.args, exitCode))
+ if cmd.expectedError != nil {
+ c.Assert(error, check.NotNil, check.Commentf("Expected an error %q, got nothing", cmd.expectedError))
+ c.Assert(error.Error(), check.Equals, cmd.expectedError.Error(), check.Commentf("Expected error %q for arguments %v, got %q", cmd.expectedError.Error(), cmd.args, error.Error()))
+ } else {
+ c.Assert(error, check.IsNil, check.Commentf("Expected no error, got %v", error))
+ }
+ }
+}
+
+// Helpers :)
+
+// Type implementing the io.Writer interface for analyzing output.
+type String struct {
+ value string
+}
+
+// The only function required by the io.Writer interface. Will append
+// written data to the String.value string.
+func (s *String) Write(p []byte) (n int, err error) {
+ s.value += string(p)
+ return len(p), nil
+}
+
+// Helper function that mock the exec.Command call (and call the test binary)
+func fakeExecCommand(command string, args ...string) *exec.Cmd {
+ cs := []string{"-test.run=TestHelperProcess", "--", command}
+ cs = append(cs, args...)
+ cmd := exec.Command(os.Args[0], cs...)
+ cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
+ return cmd
+}
+
+func TestHelperProcess(t *testing.T) {
+ if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
+ return
+ }
+ args := os.Args
+
+ // Previous arguments are tests stuff, that looks like :
+ // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess --
+ cmd, args := args[3], args[4:]
+ // Handle the case where args[0] is dir:...
+ if len(args) > 0 && strings.HasPrefix(args[0], "dir:") {
+ expectedCwd := args[0][4:]
+ if len(args) > 1 {
+ args = args[1:]
+ }
+ cwd, err := os.Getwd()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to get workingdir: %v", err)
+ os.Exit(1)
+ }
+ // This checks that the given path is the same as the currend working dire
+ if expectedCwd != cwd {
+ fmt.Fprintf(os.Stderr, "Current workdir should be %q, but is %q", expectedCwd, cwd)
+ }
+ }
+ switch cmd {
+ case dockerBinary:
+ argsStr := strings.Join(args, " ")
+ switch argsStr {
+ case "an exitCode 127":
+ fmt.Fprintf(os.Stderr, "an error has occurred with exitCode 127")
+ os.Exit(127)
+ case "an error":
+ fmt.Fprintf(os.Stderr, "an error has occurred")
+ os.Exit(1)
+ case "a command that times out":
+ time.Sleep(10 * time.Second)
+ fmt.Fprintf(os.Stdout, "too long, should be killed")
+ // A random exit code (that should never happened in tests)
+ os.Exit(7)
+ case "run -ti ubuntu echo hello":
+ fmt.Fprintf(os.Stdout, "hello")
+ default:
+ fmt.Fprintf(os.Stdout, "no arguments")
+ }
+ default:
+ fmt.Fprintf(os.Stderr, "Command %s not found.", cmd)
+ os.Exit(1)
+ }
+ // some code here to check arguments perhaps?
+ os.Exit(0)
+}
diff --git a/vendor/github.com/docker/docker/pkg/integration/utils.go b/vendor/github.com/docker/docker/pkg/integration/utils.go
new file mode 100644
index 00000000000..cfccc80143b
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/integration/utils.go
@@ -0,0 +1,361 @@
+package integration
+
+import (
+ "archive/tar"
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/docker/docker/pkg/stringutils"
+)
+
+// GetExitCode returns the ExitStatus of the specified error if its type is
+// exec.ExitError, returns 0 and an error otherwise.
+func GetExitCode(err error) (int, error) {
+ exitCode := 0
+ if exiterr, ok := err.(*exec.ExitError); ok {
+ if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok {
+ return procExit.ExitStatus(), nil
+ }
+ }
+ return exitCode, fmt.Errorf("failed to get exit code")
+}
+
+// ProcessExitCode process the specified error and returns the exit status code
+// if the error was of type exec.ExitError, returns nothing otherwise.
+func ProcessExitCode(err error) (exitCode int) {
+ if err != nil {
+ var exiterr error
+ if exitCode, exiterr = GetExitCode(err); exiterr != nil {
+ // TODO: Fix this so we check the error's text.
+ // we've failed to retrieve exit code, so we set it to 127
+ exitCode = 127
+ }
+ }
+ return
+}
+
+// IsKilled process the specified error and returns whether the process was killed or not.
+func IsKilled(err error) bool {
+ if exitErr, ok := err.(*exec.ExitError); ok {
+ status, ok := exitErr.Sys().(syscall.WaitStatus)
+ if !ok {
+ return false
+ }
+ // status.ExitStatus() is required on Windows because it does not
+ // implement Signal() nor Signaled(). Just check it had a bad exit
+ // status could mean it was killed (and in tests we do kill)
+ return (status.Signaled() && status.Signal() == os.Kill) || status.ExitStatus() != 0
+ }
+ return false
+}
+
+// RunCommandWithOutput runs the specified command and returns the combined output (stdout/stderr)
+// with the exitCode different from 0 and the error if something bad happened
+func RunCommandWithOutput(cmd *exec.Cmd) (output string, exitCode int, err error) {
+ exitCode = 0
+ out, err := cmd.CombinedOutput()
+ exitCode = ProcessExitCode(err)
+ output = string(out)
+ return
+}
+
+// RunCommandWithStdoutStderr runs the specified command and returns stdout and stderr separately
+// with the exitCode different from 0 and the error if something bad happened
+func RunCommandWithStdoutStderr(cmd *exec.Cmd) (stdout string, stderr string, exitCode int, err error) {
+ var (
+ stderrBuffer, stdoutBuffer bytes.Buffer
+ )
+ exitCode = 0
+ cmd.Stderr = &stderrBuffer
+ cmd.Stdout = &stdoutBuffer
+ err = cmd.Run()
+ exitCode = ProcessExitCode(err)
+
+ stdout = stdoutBuffer.String()
+ stderr = stderrBuffer.String()
+ return
+}
+
+// RunCommandWithOutputForDuration runs the specified command "timeboxed" by the specified duration.
+// If the process is still running when the timebox is finished, the process will be killed and .
+// It will returns the output with the exitCode different from 0 and the error if something bad happened
+// and a boolean whether it has been killed or not.
+func RunCommandWithOutputForDuration(cmd *exec.Cmd, duration time.Duration) (output string, exitCode int, timedOut bool, err error) {
+ var outputBuffer bytes.Buffer
+ if cmd.Stdout != nil {
+ err = errors.New("cmd.Stdout already set")
+ return
+ }
+ cmd.Stdout = &outputBuffer
+
+ if cmd.Stderr != nil {
+ err = errors.New("cmd.Stderr already set")
+ return
+ }
+ cmd.Stderr = &outputBuffer
+
+ // Start the command in the main thread..
+ err = cmd.Start()
+ if err != nil {
+ err = fmt.Errorf("Fail to start command %v : %v", cmd, err)
+ }
+
+ type exitInfo struct {
+ exitErr error
+ exitCode int
+ }
+
+ done := make(chan exitInfo, 1)
+
+ go func() {
+ // And wait for it to exit in the goroutine :)
+ info := exitInfo{}
+ info.exitErr = cmd.Wait()
+ info.exitCode = ProcessExitCode(info.exitErr)
+ done <- info
+ }()
+
+ select {
+ case <-time.After(duration):
+ killErr := cmd.Process.Kill()
+ if killErr != nil {
+ fmt.Printf("failed to kill (pid=%d): %v\n", cmd.Process.Pid, killErr)
+ }
+ timedOut = true
+ case info := <-done:
+ err = info.exitErr
+ exitCode = info.exitCode
+ }
+ output = outputBuffer.String()
+ return
+}
+
+var errCmdTimeout = fmt.Errorf("command timed out")
+
+// RunCommandWithOutputAndTimeout runs the specified command "timeboxed" by the specified duration.
+// It returns the output with the exitCode different from 0 and the error if something bad happened or
+// if the process timed out (and has been killed).
+func RunCommandWithOutputAndTimeout(cmd *exec.Cmd, timeout time.Duration) (output string, exitCode int, err error) {
+ var timedOut bool
+ output, exitCode, timedOut, err = RunCommandWithOutputForDuration(cmd, timeout)
+ if timedOut {
+ err = errCmdTimeout
+ }
+ return
+}
+
+// RunCommand runs the specified command and returns the exitCode different from 0
+// and the error if something bad happened.
+func RunCommand(cmd *exec.Cmd) (exitCode int, err error) {
+ exitCode = 0
+ err = cmd.Run()
+ exitCode = ProcessExitCode(err)
+ return
+}
+
+// RunCommandPipelineWithOutput runs the array of commands with the output
+// of each pipelined with the following (like cmd1 | cmd2 | cmd3 would do).
+// It returns the final output, the exitCode different from 0 and the error
+// if something bad happened.
+func RunCommandPipelineWithOutput(cmds ...*exec.Cmd) (output string, exitCode int, err error) {
+ if len(cmds) < 2 {
+ return "", 0, errors.New("pipeline does not have multiple cmds")
+ }
+
+ // connect stdin of each cmd to stdout pipe of previous cmd
+ for i, cmd := range cmds {
+ if i > 0 {
+ prevCmd := cmds[i-1]
+ cmd.Stdin, err = prevCmd.StdoutPipe()
+
+ if err != nil {
+ return "", 0, fmt.Errorf("cannot set stdout pipe for %s: %v", cmd.Path, err)
+ }
+ }
+ }
+
+ // start all cmds except the last
+ for _, cmd := range cmds[:len(cmds)-1] {
+ if err = cmd.Start(); err != nil {
+ return "", 0, fmt.Errorf("starting %s failed with error: %v", cmd.Path, err)
+ }
+ }
+
+ var pipelineError error
+ defer func() {
+ // wait all cmds except the last to release their resources
+ for _, cmd := range cmds[:len(cmds)-1] {
+ if err := cmd.Wait(); err != nil {
+ pipelineError = fmt.Errorf("command %s failed with error: %v", cmd.Path, err)
+ break
+ }
+ }
+ }()
+ if pipelineError != nil {
+ return "", 0, pipelineError
+ }
+
+ // wait on last cmd
+ return RunCommandWithOutput(cmds[len(cmds)-1])
+}
+
+// UnmarshalJSON deserialize a JSON in the given interface.
+func UnmarshalJSON(data []byte, result interface{}) error {
+ if err := json.Unmarshal(data, result); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ConvertSliceOfStringsToMap converts a slices of string in a map
+// with the strings as key and an empty string as values.
+func ConvertSliceOfStringsToMap(input []string) map[string]struct{} {
+ output := make(map[string]struct{})
+ for _, v := range input {
+ output[v] = struct{}{}
+ }
+ return output
+}
+
+// CompareDirectoryEntries compares two sets of FileInfo (usually taken from a directory)
+// and returns an error if different.
+func CompareDirectoryEntries(e1 []os.FileInfo, e2 []os.FileInfo) error {
+ var (
+ e1Entries = make(map[string]struct{})
+ e2Entries = make(map[string]struct{})
+ )
+ for _, e := range e1 {
+ e1Entries[e.Name()] = struct{}{}
+ }
+ for _, e := range e2 {
+ e2Entries[e.Name()] = struct{}{}
+ }
+ if !reflect.DeepEqual(e1Entries, e2Entries) {
+ return fmt.Errorf("entries differ")
+ }
+ return nil
+}
+
+// ListTar lists the entries of a tar.
+func ListTar(f io.Reader) ([]string, error) {
+ tr := tar.NewReader(f)
+ var entries []string
+
+ for {
+ th, err := tr.Next()
+ if err == io.EOF {
+ // end of tar archive
+ return entries, nil
+ }
+ if err != nil {
+ return entries, err
+ }
+ entries = append(entries, th.Name)
+ }
+}
+
+// RandomTmpDirPath provides a temporary path with rand string appended.
+// does not create or checks if it exists.
+func RandomTmpDirPath(s string, platform string) string {
+ tmp := "/tmp"
+ if platform == "windows" {
+ tmp = os.Getenv("TEMP")
+ }
+ path := filepath.Join(tmp, fmt.Sprintf("%s.%s", s, stringutils.GenerateRandomAlphaOnlyString(10)))
+ if platform == "windows" {
+ return filepath.FromSlash(path) // Using \
+ }
+ return filepath.ToSlash(path) // Using /
+}
+
+// ConsumeWithSpeed reads chunkSize bytes from reader before sleeping
+// for interval duration. Returns total read bytes. Send true to the
+// stop channel to return before reading to EOF on the reader.
+func ConsumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, stop chan bool) (n int, err error) {
+ buffer := make([]byte, chunkSize)
+ for {
+ var readBytes int
+ readBytes, err = reader.Read(buffer)
+ n += readBytes
+ if err != nil {
+ if err == io.EOF {
+ err = nil
+ }
+ return
+ }
+ select {
+ case <-stop:
+ return
+ case <-time.After(interval):
+ }
+ }
+}
+
+// ParseCgroupPaths parses 'procCgroupData', which is output of '/proc//cgroup', and returns
+// a map which cgroup name as key and path as value.
+func ParseCgroupPaths(procCgroupData string) map[string]string {
+ cgroupPaths := map[string]string{}
+ for _, line := range strings.Split(procCgroupData, "\n") {
+ parts := strings.Split(line, ":")
+ if len(parts) != 3 {
+ continue
+ }
+ cgroupPaths[parts[1]] = parts[2]
+ }
+ return cgroupPaths
+}
+
+// ChannelBuffer holds a chan of byte array that can be populate in a goroutine.
+type ChannelBuffer struct {
+ C chan []byte
+}
+
+// Write implements Writer.
+func (c *ChannelBuffer) Write(b []byte) (int, error) {
+ c.C <- b
+ return len(b), nil
+}
+
+// Close closes the go channel.
+func (c *ChannelBuffer) Close() error {
+ close(c.C)
+ return nil
+}
+
+// ReadTimeout reads the content of the channel in the specified byte array with
+// the specified duration as timeout.
+func (c *ChannelBuffer) ReadTimeout(p []byte, n time.Duration) (int, error) {
+ select {
+ case b := <-c.C:
+ return copy(p[0:], b), nil
+ case <-time.After(n):
+ return -1, fmt.Errorf("timeout reading from channel")
+ }
+}
+
+// RunAtDifferentDate runs the specified function with the given time.
+// It changes the date of the system, which can led to weird behaviors.
+func RunAtDifferentDate(date time.Time, block func()) {
+ // Layout for date. MMDDhhmmYYYY
+ const timeLayout = "010203042006"
+ // Ensure we bring time back to now
+ now := time.Now().Format(timeLayout)
+ dateReset := exec.Command("date", now)
+ defer RunCommand(dateReset)
+
+ dateChange := exec.Command("date", date.Format(timeLayout))
+ RunCommand(dateChange)
+ block()
+ return
+}
diff --git a/vendor/github.com/docker/docker/pkg/integration/utils_test.go b/vendor/github.com/docker/docker/pkg/integration/utils_test.go
new file mode 100644
index 00000000000..b354ab932d5
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/integration/utils_test.go
@@ -0,0 +1,572 @@
+package integration
+
+import (
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestIsKilledFalseWithNonKilledProcess(t *testing.T) {
+ var lsCmd *exec.Cmd
+ if runtime.GOOS != "windows" {
+ lsCmd = exec.Command("ls")
+ } else {
+ lsCmd = exec.Command("cmd", "/c", "dir")
+ }
+
+ err := lsCmd.Run()
+ if IsKilled(err) {
+ t.Fatalf("Expected the ls command to not be killed, was.")
+ }
+}
+
+func TestIsKilledTrueWithKilledProcess(t *testing.T) {
+ var longCmd *exec.Cmd
+ if runtime.GOOS != "windows" {
+ longCmd = exec.Command("top")
+ } else {
+ longCmd = exec.Command("powershell", "while ($true) { sleep 1 }")
+ }
+
+ // Start a command
+ err := longCmd.Start()
+ if err != nil {
+ t.Fatal(err)
+ }
+ // Capture the error when *dying*
+ done := make(chan error, 1)
+ go func() {
+ done <- longCmd.Wait()
+ }()
+ // Then kill it
+ longCmd.Process.Kill()
+ // Get the error
+ err = <-done
+ if !IsKilled(err) {
+ t.Fatalf("Expected the command to be killed, was not.")
+ }
+}
+
+func TestRunCommandWithOutput(t *testing.T) {
+ var (
+ echoHelloWorldCmd *exec.Cmd
+ expected string
+ )
+ if runtime.GOOS != "windows" {
+ echoHelloWorldCmd = exec.Command("echo", "hello", "world")
+ expected = "hello world\n"
+ } else {
+ echoHelloWorldCmd = exec.Command("cmd", "/s", "/c", "echo", "hello", "world")
+ expected = "hello world\r\n"
+ }
+
+ out, exitCode, err := RunCommandWithOutput(echoHelloWorldCmd)
+ if out != expected || exitCode != 0 || err != nil {
+ t.Fatalf("Expected command to output %s, got %s, %v with exitCode %v", expected, out, err, exitCode)
+ }
+}
+
+func TestRunCommandWithOutputError(t *testing.T) {
+ var (
+ p string
+ wrongCmd *exec.Cmd
+ expected string
+ expectedExitCode int
+ )
+
+ if runtime.GOOS != "windows" {
+ p = "$PATH"
+ wrongCmd = exec.Command("ls", "-z")
+ expected = `ls: invalid option -- 'z'
+Try 'ls --help' for more information.
+`
+ expectedExitCode = 2
+ } else {
+ p = "%PATH%"
+ wrongCmd = exec.Command("cmd", "/s", "/c", "dir", "/Z")
+ expected = "Invalid switch - " + strconv.Quote("Z") + ".\r\n"
+ expectedExitCode = 1
+ }
+ cmd := exec.Command("doesnotexists")
+ out, exitCode, err := RunCommandWithOutput(cmd)
+ expectedError := `exec: "doesnotexists": executable file not found in ` + p
+ if out != "" || exitCode != 127 || err == nil || err.Error() != expectedError {
+ t.Fatalf("Expected command to output %s, got %s, %v with exitCode %v", expectedError, out, err, exitCode)
+ }
+
+ out, exitCode, err = RunCommandWithOutput(wrongCmd)
+
+ if out != expected || exitCode != expectedExitCode || err == nil || !strings.Contains(err.Error(), "exit status "+strconv.Itoa(expectedExitCode)) {
+ t.Fatalf("Expected command to output %s, got out:xxx%sxxx, err:%v with exitCode %v", expected, out, err, exitCode)
+ }
+}
+
+func TestRunCommandWithStdoutStderr(t *testing.T) {
+ echoHelloWorldCmd := exec.Command("echo", "hello", "world")
+ stdout, stderr, exitCode, err := RunCommandWithStdoutStderr(echoHelloWorldCmd)
+ expected := "hello world\n"
+ if stdout != expected || stderr != "" || exitCode != 0 || err != nil {
+ t.Fatalf("Expected command to output %s, got stdout:%s, stderr:%s, err:%v with exitCode %v", expected, stdout, stderr, err, exitCode)
+ }
+}
+
+func TestRunCommandWithStdoutStderrError(t *testing.T) {
+ p := "$PATH"
+ if runtime.GOOS == "windows" {
+ p = "%PATH%"
+ }
+ cmd := exec.Command("doesnotexists")
+ stdout, stderr, exitCode, err := RunCommandWithStdoutStderr(cmd)
+ expectedError := `exec: "doesnotexists": executable file not found in ` + p
+ if stdout != "" || stderr != "" || exitCode != 127 || err == nil || err.Error() != expectedError {
+ t.Fatalf("Expected command to output out:%s, stderr:%s, got stdout:%s, stderr:%s, err:%v with exitCode %v", "", "", stdout, stderr, err, exitCode)
+ }
+
+ wrongLsCmd := exec.Command("ls", "-z")
+ expected := `ls: invalid option -- 'z'
+Try 'ls --help' for more information.
+`
+
+ stdout, stderr, exitCode, err = RunCommandWithStdoutStderr(wrongLsCmd)
+ if stdout != "" && stderr != expected || exitCode != 2 || err == nil || err.Error() != "exit status 2" {
+ t.Fatalf("Expected command to output out:%s, stderr:%s, got stdout:%s, stderr:%s, err:%v with exitCode %v", "", expectedError, stdout, stderr, err, exitCode)
+ }
+}
+
+func TestRunCommandWithOutputForDurationFinished(t *testing.T) {
+ // TODO Windows: Port this test
+ if runtime.GOOS == "windows" {
+ t.Skip("Needs porting to Windows")
+ }
+
+ cmd := exec.Command("ls")
+ out, exitCode, timedOut, err := RunCommandWithOutputForDuration(cmd, 50*time.Millisecond)
+ if out == "" || exitCode != 0 || timedOut || err != nil {
+ t.Fatalf("Expected the command to run for less 50 milliseconds and thus not time out, but did not : out:[%s], exitCode:[%d], timedOut:[%v], err:[%v]", out, exitCode, timedOut, err)
+ }
+}
+
+func TestRunCommandWithOutputForDurationKilled(t *testing.T) {
+ // TODO Windows: Port this test
+ if runtime.GOOS == "windows" {
+ t.Skip("Needs porting to Windows")
+ }
+ cmd := exec.Command("sh", "-c", "while true ; do echo 1 ; sleep .1 ; done")
+ out, exitCode, timedOut, err := RunCommandWithOutputForDuration(cmd, 500*time.Millisecond)
+ ones := strings.Split(out, "\n")
+ if len(ones) != 6 || exitCode != 0 || !timedOut || err != nil {
+ t.Fatalf("Expected the command to run for 500 milliseconds (and thus print six lines (five with 1, one empty) and time out, but did not : out:[%s], exitCode:%d, timedOut:%v, err:%v", out, exitCode, timedOut, err)
+ }
+}
+
+func TestRunCommandWithOutputForDurationErrors(t *testing.T) {
+ cmd := exec.Command("ls")
+ cmd.Stdout = os.Stdout
+ if _, _, _, err := RunCommandWithOutputForDuration(cmd, 1*time.Millisecond); err == nil || err.Error() != "cmd.Stdout already set" {
+ t.Fatalf("Expected an error as cmd.Stdout was already set, did not (err:%s).", err)
+ }
+ cmd = exec.Command("ls")
+ cmd.Stderr = os.Stderr
+ if _, _, _, err := RunCommandWithOutputForDuration(cmd, 1*time.Millisecond); err == nil || err.Error() != "cmd.Stderr already set" {
+ t.Fatalf("Expected an error as cmd.Stderr was already set, did not (err:%s).", err)
+ }
+}
+
+func TestRunCommandWithOutputAndTimeoutFinished(t *testing.T) {
+ // TODO Windows: Port this test
+ if runtime.GOOS == "windows" {
+ t.Skip("Needs porting to Windows")
+ }
+
+ cmd := exec.Command("ls")
+ out, exitCode, err := RunCommandWithOutputAndTimeout(cmd, 50*time.Millisecond)
+ if out == "" || exitCode != 0 || err != nil {
+ t.Fatalf("Expected the command to run for less 50 milliseconds and thus not time out, but did not : out:[%s], exitCode:[%d], err:[%v]", out, exitCode, err)
+ }
+}
+
+func TestRunCommandWithOutputAndTimeoutKilled(t *testing.T) {
+ // TODO Windows: Port this test
+ if runtime.GOOS == "windows" {
+ t.Skip("Needs porting to Windows")
+ }
+
+ cmd := exec.Command("sh", "-c", "while true ; do echo 1 ; sleep .1 ; done")
+ out, exitCode, err := RunCommandWithOutputAndTimeout(cmd, 500*time.Millisecond)
+ ones := strings.Split(out, "\n")
+ if len(ones) != 6 || exitCode != 0 || err == nil || err.Error() != "command timed out" {
+ t.Fatalf("Expected the command to run for 500 milliseconds (and thus print six lines (five with 1, one empty) and time out with an error 'command timed out', but did not : out:[%s], exitCode:%d, err:%v", out, exitCode, err)
+ }
+}
+
+func TestRunCommandWithOutputAndTimeoutErrors(t *testing.T) {
+ cmd := exec.Command("ls")
+ cmd.Stdout = os.Stdout
+ if _, _, err := RunCommandWithOutputAndTimeout(cmd, 1*time.Millisecond); err == nil || err.Error() != "cmd.Stdout already set" {
+ t.Fatalf("Expected an error as cmd.Stdout was already set, did not (err:%s).", err)
+ }
+ cmd = exec.Command("ls")
+ cmd.Stderr = os.Stderr
+ if _, _, err := RunCommandWithOutputAndTimeout(cmd, 1*time.Millisecond); err == nil || err.Error() != "cmd.Stderr already set" {
+ t.Fatalf("Expected an error as cmd.Stderr was already set, did not (err:%s).", err)
+ }
+}
+
+func TestRunCommand(t *testing.T) {
+ // TODO Windows: Port this test
+ if runtime.GOOS == "windows" {
+ t.Skip("Needs porting to Windows")
+ }
+
+ p := "$PATH"
+ if runtime.GOOS == "windows" {
+ p = "%PATH%"
+ }
+ lsCmd := exec.Command("ls")
+ exitCode, err := RunCommand(lsCmd)
+ if exitCode != 0 || err != nil {
+ t.Fatalf("Expected runCommand to run the command successfully, got: exitCode:%d, err:%v", exitCode, err)
+ }
+
+ var expectedError string
+
+ exitCode, err = RunCommand(exec.Command("doesnotexists"))
+ expectedError = `exec: "doesnotexists": executable file not found in ` + p
+ if exitCode != 127 || err == nil || err.Error() != expectedError {
+ t.Fatalf("Expected runCommand to run the command successfully, got: exitCode:%d, err:%v", exitCode, err)
+ }
+ wrongLsCmd := exec.Command("ls", "-z")
+ expected := 2
+ expectedError = `exit status 2`
+ exitCode, err = RunCommand(wrongLsCmd)
+ if exitCode != expected || err == nil || err.Error() != expectedError {
+ t.Fatalf("Expected runCommand to run the command successfully, got: exitCode:%d, err:%v", exitCode, err)
+ }
+}
+
+func TestRunCommandPipelineWithOutputWithNotEnoughCmds(t *testing.T) {
+ _, _, err := RunCommandPipelineWithOutput(exec.Command("ls"))
+ expectedError := "pipeline does not have multiple cmds"
+ if err == nil || err.Error() != expectedError {
+ t.Fatalf("Expected an error with %s, got err:%s", expectedError, err)
+ }
+}
+
+func TestRunCommandPipelineWithOutputErrors(t *testing.T) {
+ p := "$PATH"
+ if runtime.GOOS == "windows" {
+ p = "%PATH%"
+ }
+ cmd1 := exec.Command("ls")
+ cmd1.Stdout = os.Stdout
+ cmd2 := exec.Command("anything really")
+ _, _, err := RunCommandPipelineWithOutput(cmd1, cmd2)
+ if err == nil || err.Error() != "cannot set stdout pipe for anything really: exec: Stdout already set" {
+ t.Fatalf("Expected an error, got %v", err)
+ }
+
+ cmdWithError := exec.Command("doesnotexists")
+ cmdCat := exec.Command("cat")
+ _, _, err = RunCommandPipelineWithOutput(cmdWithError, cmdCat)
+ if err == nil || err.Error() != `starting doesnotexists failed with error: exec: "doesnotexists": executable file not found in `+p {
+ t.Fatalf("Expected an error, got %v", err)
+ }
+}
+
+func TestRunCommandPipelineWithOutput(t *testing.T) {
+ cmds := []*exec.Cmd{
+ // Print 2 characters
+ exec.Command("echo", "-n", "11"),
+ // Count the number or char from stdin (previous command)
+ exec.Command("wc", "-m"),
+ }
+ out, exitCode, err := RunCommandPipelineWithOutput(cmds...)
+ expectedOutput := "2\n"
+ if out != expectedOutput || exitCode != 0 || err != nil {
+ t.Fatalf("Expected %s for commands %v, got out:%s, exitCode:%d, err:%v", expectedOutput, cmds, out, exitCode, err)
+ }
+}
+
+// Simple simple test as it is just a passthrough for json.Unmarshal
+func TestUnmarshalJSON(t *testing.T) {
+ emptyResult := struct{}{}
+ if err := UnmarshalJSON([]byte(""), &emptyResult); err == nil {
+ t.Fatalf("Expected an error, got nothing")
+ }
+ result := struct{ Name string }{}
+ if err := UnmarshalJSON([]byte(`{"name": "name"}`), &result); err != nil {
+ t.Fatal(err)
+ }
+ if result.Name != "name" {
+ t.Fatalf("Expected result.name to be 'name', was '%s'", result.Name)
+ }
+}
+
+func TestConvertSliceOfStringsToMap(t *testing.T) {
+ input := []string{"a", "b"}
+ actual := ConvertSliceOfStringsToMap(input)
+ for _, key := range input {
+ if _, ok := actual[key]; !ok {
+ t.Fatalf("Expected output to contains key %s, did not: %v", key, actual)
+ }
+ }
+}
+
+func TestCompareDirectoryEntries(t *testing.T) {
+ tmpFolder, err := ioutil.TempDir("", "integration-cli-utils-compare-directories")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpFolder)
+
+ file1 := filepath.Join(tmpFolder, "file1")
+ file2 := filepath.Join(tmpFolder, "file2")
+ os.Create(file1)
+ os.Create(file2)
+
+ fi1, err := os.Stat(file1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ fi1bis, err := os.Stat(file1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ fi2, err := os.Stat(file2)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cases := []struct {
+ e1 []os.FileInfo
+ e2 []os.FileInfo
+ shouldError bool
+ }{
+ // Empty directories
+ {
+ []os.FileInfo{},
+ []os.FileInfo{},
+ false,
+ },
+ // Same FileInfos
+ {
+ []os.FileInfo{fi1},
+ []os.FileInfo{fi1},
+ false,
+ },
+ // Different FileInfos but same names
+ {
+ []os.FileInfo{fi1},
+ []os.FileInfo{fi1bis},
+ false,
+ },
+ // Different FileInfos, different names
+ {
+ []os.FileInfo{fi1},
+ []os.FileInfo{fi2},
+ true,
+ },
+ }
+ for _, elt := range cases {
+ err := CompareDirectoryEntries(elt.e1, elt.e2)
+ if elt.shouldError && err == nil {
+ t.Fatalf("Should have return an error, did not with %v and %v", elt.e1, elt.e2)
+ }
+ if !elt.shouldError && err != nil {
+ t.Fatalf("Should have not returned an error, but did : %v with %v and %v", err, elt.e1, elt.e2)
+ }
+ }
+}
+
+// FIXME make an "unhappy path" test for ListTar without "panicking" :-)
+func TestListTar(t *testing.T) {
+ // TODO Windows: Figure out why this fails. Should be portable.
+ if runtime.GOOS == "windows" {
+ t.Skip("Failing on Windows - needs further investigation")
+ }
+ tmpFolder, err := ioutil.TempDir("", "integration-cli-utils-list-tar")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpFolder)
+
+ // Let's create a Tar file
+ srcFile := filepath.Join(tmpFolder, "src")
+ tarFile := filepath.Join(tmpFolder, "src.tar")
+ os.Create(srcFile)
+ cmd := exec.Command("sh", "-c", "tar cf "+tarFile+" "+srcFile)
+ _, err = cmd.CombinedOutput()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ reader, err := os.Open(tarFile)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer reader.Close()
+
+ entries, err := ListTar(reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(entries) != 1 && entries[0] != "src" {
+ t.Fatalf("Expected a tar file with 1 entry (%s), got %v", srcFile, entries)
+ }
+}
+
+func TestRandomTmpDirPath(t *testing.T) {
+ path := RandomTmpDirPath("something", runtime.GOOS)
+
+ prefix := "/tmp/something"
+ if runtime.GOOS == "windows" {
+ prefix = os.Getenv("TEMP") + `\something`
+ }
+ expectedSize := len(prefix) + 11
+
+ if !strings.HasPrefix(path, prefix) {
+ t.Fatalf("Expected generated path to have '%s' as prefix, got %s'", prefix, path)
+ }
+ if len(path) != expectedSize {
+ t.Fatalf("Expected generated path to be %d, got %d", expectedSize, len(path))
+ }
+}
+
+func TestConsumeWithSpeed(t *testing.T) {
+ reader := strings.NewReader("1234567890")
+ chunksize := 2
+
+ bytes1, err := ConsumeWithSpeed(reader, chunksize, 1*time.Second, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if bytes1 != 10 {
+ t.Fatalf("Expected to have read 10 bytes, got %d", bytes1)
+ }
+
+}
+
+func TestConsumeWithSpeedWithStop(t *testing.T) {
+ reader := strings.NewReader("1234567890")
+ chunksize := 2
+
+ stopIt := make(chan bool)
+
+ go func() {
+ time.Sleep(1 * time.Millisecond)
+ stopIt <- true
+ }()
+
+ bytes1, err := ConsumeWithSpeed(reader, chunksize, 20*time.Millisecond, stopIt)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if bytes1 != 2 {
+ t.Fatalf("Expected to have read 2 bytes, got %d", bytes1)
+ }
+
+}
+
+func TestParseCgroupPathsEmpty(t *testing.T) {
+ cgroupMap := ParseCgroupPaths("")
+ if len(cgroupMap) != 0 {
+ t.Fatalf("Expected an empty map, got %v", cgroupMap)
+ }
+ cgroupMap = ParseCgroupPaths("\n")
+ if len(cgroupMap) != 0 {
+ t.Fatalf("Expected an empty map, got %v", cgroupMap)
+ }
+ cgroupMap = ParseCgroupPaths("something:else\nagain:here")
+ if len(cgroupMap) != 0 {
+ t.Fatalf("Expected an empty map, got %v", cgroupMap)
+ }
+}
+
+func TestParseCgroupPaths(t *testing.T) {
+ cgroupMap := ParseCgroupPaths("2:memory:/a\n1:cpuset:/b")
+ if len(cgroupMap) != 2 {
+ t.Fatalf("Expected a map with 2 entries, got %v", cgroupMap)
+ }
+ if value, ok := cgroupMap["memory"]; !ok || value != "/a" {
+ t.Fatalf("Expected cgroupMap to contains an entry for 'memory' with value '/a', got %v", cgroupMap)
+ }
+ if value, ok := cgroupMap["cpuset"]; !ok || value != "/b" {
+ t.Fatalf("Expected cgroupMap to contains an entry for 'cpuset' with value '/b', got %v", cgroupMap)
+ }
+}
+
+func TestChannelBufferTimeout(t *testing.T) {
+ expected := "11"
+
+ buf := &ChannelBuffer{make(chan []byte, 1)}
+ defer buf.Close()
+
+ done := make(chan struct{}, 1)
+ go func() {
+ time.Sleep(100 * time.Millisecond)
+ io.Copy(buf, strings.NewReader(expected))
+ done <- struct{}{}
+ }()
+
+ // Wait long enough
+ b := make([]byte, 2)
+ _, err := buf.ReadTimeout(b, 50*time.Millisecond)
+ if err == nil && err.Error() != "timeout reading from channel" {
+ t.Fatalf("Expected an error, got %s", err)
+ }
+ <-done
+}
+
+func TestChannelBuffer(t *testing.T) {
+ expected := "11"
+
+ buf := &ChannelBuffer{make(chan []byte, 1)}
+ defer buf.Close()
+
+ go func() {
+ time.Sleep(100 * time.Millisecond)
+ io.Copy(buf, strings.NewReader(expected))
+ }()
+
+ // Wait long enough
+ b := make([]byte, 2)
+ _, err := buf.ReadTimeout(b, 200*time.Millisecond)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if string(b) != expected {
+ t.Fatalf("Expected '%s', got '%s'", expected, string(b))
+ }
+}
+
+// FIXME doesn't work
+// func TestRunAtDifferentDate(t *testing.T) {
+// var date string
+
+// // Layout for date. MMDDhhmmYYYY
+// const timeLayout = "20060102"
+// expectedDate := "20100201"
+// theDate, err := time.Parse(timeLayout, expectedDate)
+// if err != nil {
+// t.Fatal(err)
+// }
+
+// RunAtDifferentDate(theDate, func() {
+// cmd := exec.Command("date", "+%Y%M%d")
+// out, err := cmd.Output()
+// if err != nil {
+// t.Fatal(err)
+// }
+// date = string(out)
+// })
+// }
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/buffer.go b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go
new file mode 100644
index 00000000000..3d737b3e19d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go
@@ -0,0 +1,51 @@
+package ioutils
+
+import (
+ "errors"
+ "io"
+)
+
+var errBufferFull = errors.New("buffer is full")
+
+type fixedBuffer struct {
+ buf []byte
+ pos int
+ lastRead int
+}
+
+func (b *fixedBuffer) Write(p []byte) (int, error) {
+ n := copy(b.buf[b.pos:cap(b.buf)], p)
+ b.pos += n
+
+ if n < len(p) {
+ if b.pos == cap(b.buf) {
+ return n, errBufferFull
+ }
+ return n, io.ErrShortWrite
+ }
+ return n, nil
+}
+
+func (b *fixedBuffer) Read(p []byte) (int, error) {
+ n := copy(p, b.buf[b.lastRead:b.pos])
+ b.lastRead += n
+ return n, nil
+}
+
+func (b *fixedBuffer) Len() int {
+ return b.pos - b.lastRead
+}
+
+func (b *fixedBuffer) Cap() int {
+ return cap(b.buf)
+}
+
+func (b *fixedBuffer) Reset() {
+ b.pos = 0
+ b.lastRead = 0
+ b.buf = b.buf[:0]
+}
+
+func (b *fixedBuffer) String() string {
+ return string(b.buf[b.lastRead:b.pos])
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/buffer_test.go b/vendor/github.com/docker/docker/pkg/ioutils/buffer_test.go
new file mode 100644
index 00000000000..41098fa6e7c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/buffer_test.go
@@ -0,0 +1,75 @@
+package ioutils
+
+import (
+ "bytes"
+ "testing"
+)
+
+func TestFixedBufferWrite(t *testing.T) {
+ buf := &fixedBuffer{buf: make([]byte, 0, 64)}
+ n, err := buf.Write([]byte("hello"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if n != 5 {
+ t.Fatalf("expected 5 bytes written, got %d", n)
+ }
+
+ if string(buf.buf[:5]) != "hello" {
+ t.Fatalf("expected \"hello\", got %q", string(buf.buf[:5]))
+ }
+
+ n, err = buf.Write(bytes.Repeat([]byte{1}, 64))
+ if err != errBufferFull {
+ t.Fatalf("expected errBufferFull, got %v - %v", err, buf.buf[:64])
+ }
+}
+
+func TestFixedBufferRead(t *testing.T) {
+ buf := &fixedBuffer{buf: make([]byte, 0, 64)}
+ if _, err := buf.Write([]byte("hello world")); err != nil {
+ t.Fatal(err)
+ }
+
+ b := make([]byte, 5)
+ n, err := buf.Read(b)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if n != 5 {
+ t.Fatalf("expected 5 bytes read, got %d - %s", n, buf.String())
+ }
+
+ if string(b) != "hello" {
+ t.Fatalf("expected \"hello\", got %q", string(b))
+ }
+
+ n, err = buf.Read(b)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if n != 5 {
+ t.Fatalf("expected 5 bytes read, got %d", n)
+ }
+
+ if string(b) != " worl" {
+ t.Fatalf("expected \" worl\", got %s", string(b))
+ }
+
+ b = b[:1]
+ n, err = buf.Read(b)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if n != 1 {
+ t.Fatalf("expected 1 byte read, got %d - %s", n, buf.String())
+ }
+
+ if string(b) != "d" {
+ t.Fatalf("expected \"d\", got %s", string(b))
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go
new file mode 100644
index 00000000000..72a04f34919
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go
@@ -0,0 +1,186 @@
+package ioutils
+
+import (
+ "errors"
+ "io"
+ "sync"
+)
+
+// maxCap is the highest capacity to use in byte slices that buffer data.
+const maxCap = 1e6
+
+// minCap is the lowest capacity to use in byte slices that buffer data
+const minCap = 64
+
+// blockThreshold is the minimum number of bytes in the buffer which will cause
+// a write to BytesPipe to block when allocating a new slice.
+const blockThreshold = 1e6
+
+var (
+ // ErrClosed is returned when Write is called on a closed BytesPipe.
+ ErrClosed = errors.New("write to closed BytesPipe")
+
+ bufPools = make(map[int]*sync.Pool)
+ bufPoolsLock sync.Mutex
+)
+
+// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue).
+// All written data may be read at most once. Also, BytesPipe allocates
+// and releases new byte slices to adjust to current needs, so the buffer
+// won't be overgrown after peak loads.
+type BytesPipe struct {
+ mu sync.Mutex
+ wait *sync.Cond
+ buf []*fixedBuffer
+ bufLen int
+ closeErr error // error to return from next Read. set to nil if not closed.
+}
+
+// NewBytesPipe creates new BytesPipe, initialized by specified slice.
+// If buf is nil, then it will be initialized with slice which cap is 64.
+// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf).
+func NewBytesPipe() *BytesPipe {
+ bp := &BytesPipe{}
+ bp.buf = append(bp.buf, getBuffer(minCap))
+ bp.wait = sync.NewCond(&bp.mu)
+ return bp
+}
+
+// Write writes p to BytesPipe.
+// It can allocate new []byte slices in a process of writing.
+func (bp *BytesPipe) Write(p []byte) (int, error) {
+ bp.mu.Lock()
+
+ written := 0
+loop0:
+ for {
+ if bp.closeErr != nil {
+ bp.mu.Unlock()
+ return written, ErrClosed
+ }
+
+ if len(bp.buf) == 0 {
+ bp.buf = append(bp.buf, getBuffer(64))
+ }
+ // get the last buffer
+ b := bp.buf[len(bp.buf)-1]
+
+ n, err := b.Write(p)
+ written += n
+ bp.bufLen += n
+
+ // errBufferFull is an error we expect to get if the buffer is full
+ if err != nil && err != errBufferFull {
+ bp.wait.Broadcast()
+ bp.mu.Unlock()
+ return written, err
+ }
+
+ // if there was enough room to write all then break
+ if len(p) == n {
+ break
+ }
+
+ // more data: write to the next slice
+ p = p[n:]
+
+ // make sure the buffer doesn't grow too big from this write
+ for bp.bufLen >= blockThreshold {
+ bp.wait.Wait()
+ if bp.closeErr != nil {
+ continue loop0
+ }
+ }
+
+ // add new byte slice to the buffers slice and continue writing
+ nextCap := b.Cap() * 2
+ if nextCap > maxCap {
+ nextCap = maxCap
+ }
+ bp.buf = append(bp.buf, getBuffer(nextCap))
+ }
+ bp.wait.Broadcast()
+ bp.mu.Unlock()
+ return written, nil
+}
+
+// CloseWithError causes further reads from a BytesPipe to return immediately.
+func (bp *BytesPipe) CloseWithError(err error) error {
+ bp.mu.Lock()
+ if err != nil {
+ bp.closeErr = err
+ } else {
+ bp.closeErr = io.EOF
+ }
+ bp.wait.Broadcast()
+ bp.mu.Unlock()
+ return nil
+}
+
+// Close causes further reads from a BytesPipe to return immediately.
+func (bp *BytesPipe) Close() error {
+ return bp.CloseWithError(nil)
+}
+
+// Read reads bytes from BytesPipe.
+// Data could be read only once.
+func (bp *BytesPipe) Read(p []byte) (n int, err error) {
+ bp.mu.Lock()
+ if bp.bufLen == 0 {
+ if bp.closeErr != nil {
+ bp.mu.Unlock()
+ return 0, bp.closeErr
+ }
+ bp.wait.Wait()
+ if bp.bufLen == 0 && bp.closeErr != nil {
+ err := bp.closeErr
+ bp.mu.Unlock()
+ return 0, err
+ }
+ }
+
+ for bp.bufLen > 0 {
+ b := bp.buf[0]
+ read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error
+ n += read
+ bp.bufLen -= read
+
+ if b.Len() == 0 {
+ // it's empty so return it to the pool and move to the next one
+ returnBuffer(b)
+ bp.buf[0] = nil
+ bp.buf = bp.buf[1:]
+ }
+
+ if len(p) == read {
+ break
+ }
+
+ p = p[read:]
+ }
+
+ bp.wait.Broadcast()
+ bp.mu.Unlock()
+ return
+}
+
+func returnBuffer(b *fixedBuffer) {
+ b.Reset()
+ bufPoolsLock.Lock()
+ pool := bufPools[b.Cap()]
+ bufPoolsLock.Unlock()
+ if pool != nil {
+ pool.Put(b)
+ }
+}
+
+func getBuffer(size int) *fixedBuffer {
+ bufPoolsLock.Lock()
+ pool, ok := bufPools[size]
+ if !ok {
+ pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }}
+ bufPools[size] = pool
+ }
+ bufPoolsLock.Unlock()
+ return pool.Get().(*fixedBuffer)
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe_test.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe_test.go
new file mode 100644
index 00000000000..300fb5f6d52
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe_test.go
@@ -0,0 +1,159 @@
+package ioutils
+
+import (
+ "crypto/sha1"
+ "encoding/hex"
+ "math/rand"
+ "testing"
+ "time"
+)
+
+func TestBytesPipeRead(t *testing.T) {
+ buf := NewBytesPipe()
+ buf.Write([]byte("12"))
+ buf.Write([]byte("34"))
+ buf.Write([]byte("56"))
+ buf.Write([]byte("78"))
+ buf.Write([]byte("90"))
+ rd := make([]byte, 4)
+ n, err := buf.Read(rd)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if n != 4 {
+ t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 4)
+ }
+ if string(rd) != "1234" {
+ t.Fatalf("Read %s, but must be %s", rd, "1234")
+ }
+ n, err = buf.Read(rd)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if n != 4 {
+ t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 4)
+ }
+ if string(rd) != "5678" {
+ t.Fatalf("Read %s, but must be %s", rd, "5679")
+ }
+ n, err = buf.Read(rd)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if n != 2 {
+ t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 2)
+ }
+ if string(rd[:n]) != "90" {
+ t.Fatalf("Read %s, but must be %s", rd, "90")
+ }
+}
+
+func TestBytesPipeWrite(t *testing.T) {
+ buf := NewBytesPipe()
+ buf.Write([]byte("12"))
+ buf.Write([]byte("34"))
+ buf.Write([]byte("56"))
+ buf.Write([]byte("78"))
+ buf.Write([]byte("90"))
+ if buf.buf[0].String() != "1234567890" {
+ t.Fatalf("Buffer %q, must be %q", buf.buf[0].String(), "1234567890")
+ }
+}
+
+// Write and read in different speeds/chunk sizes and check valid data is read.
+func TestBytesPipeWriteRandomChunks(t *testing.T) {
+ cases := []struct{ iterations, writesPerLoop, readsPerLoop int }{
+ {100, 10, 1},
+ {1000, 10, 5},
+ {1000, 100, 0},
+ {1000, 5, 6},
+ {10000, 50, 25},
+ }
+
+ testMessage := []byte("this is a random string for testing")
+ // random slice sizes to read and write
+ writeChunks := []int{25, 35, 15, 20}
+ readChunks := []int{5, 45, 20, 25}
+
+ for _, c := range cases {
+ // first pass: write directly to hash
+ hash := sha1.New()
+ for i := 0; i < c.iterations*c.writesPerLoop; i++ {
+ if _, err := hash.Write(testMessage[:writeChunks[i%len(writeChunks)]]); err != nil {
+ t.Fatal(err)
+ }
+ }
+ expected := hex.EncodeToString(hash.Sum(nil))
+
+ // write/read through buffer
+ buf := NewBytesPipe()
+ hash.Reset()
+
+ done := make(chan struct{})
+
+ go func() {
+ // random delay before read starts
+ <-time.After(time.Duration(rand.Intn(10)) * time.Millisecond)
+ for i := 0; ; i++ {
+ p := make([]byte, readChunks[(c.iterations*c.readsPerLoop+i)%len(readChunks)])
+ n, _ := buf.Read(p)
+ if n == 0 {
+ break
+ }
+ hash.Write(p[:n])
+ }
+
+ close(done)
+ }()
+
+ for i := 0; i < c.iterations; i++ {
+ for w := 0; w < c.writesPerLoop; w++ {
+ buf.Write(testMessage[:writeChunks[(i*c.writesPerLoop+w)%len(writeChunks)]])
+ }
+ }
+ buf.Close()
+ <-done
+
+ actual := hex.EncodeToString(hash.Sum(nil))
+
+ if expected != actual {
+ t.Fatalf("BytesPipe returned invalid data. Expected checksum %v, got %v", expected, actual)
+ }
+
+ }
+}
+
+func BenchmarkBytesPipeWrite(b *testing.B) {
+ testData := []byte("pretty short line, because why not?")
+ for i := 0; i < b.N; i++ {
+ readBuf := make([]byte, 1024)
+ buf := NewBytesPipe()
+ go func() {
+ var err error
+ for err == nil {
+ _, err = buf.Read(readBuf)
+ }
+ }()
+ for j := 0; j < 1000; j++ {
+ buf.Write(testData)
+ }
+ buf.Close()
+ }
+}
+
+func BenchmarkBytesPipeRead(b *testing.B) {
+ rd := make([]byte, 512)
+ for i := 0; i < b.N; i++ {
+ b.StopTimer()
+ buf := NewBytesPipe()
+ for j := 0; j < 500; j++ {
+ buf.Write(make([]byte, 1024))
+ }
+ b.StartTimer()
+ for j := 0; j < 1000; j++ {
+ if n, _ := buf.Read(rd); n != 512 {
+ b.Fatalf("Wrong number of bytes: %d", n)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go b/vendor/github.com/docker/docker/pkg/ioutils/fmt.go
similarity index 57%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go
rename to vendor/github.com/docker/docker/pkg/ioutils/fmt.go
index 801132ff3d3..0b04b0ba3e6 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go
+++ b/vendor/github.com/docker/docker/pkg/ioutils/fmt.go
@@ -12,3 +12,11 @@ func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) {
}
return 0, nil
}
+
+// FprintfIfTrue prints the boolean value if it's true
+func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) {
+ if ok {
+ return fmt.Fprintf(w, format, ok)
+ }
+ return 0, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fmt_test.go b/vendor/github.com/docker/docker/pkg/ioutils/fmt_test.go
new file mode 100644
index 00000000000..8968863296d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/fmt_test.go
@@ -0,0 +1,17 @@
+package ioutils
+
+import "testing"
+
+func TestFprintfIfNotEmpty(t *testing.T) {
+ wc := NewWriteCounter(&NopWriter{})
+ n, _ := FprintfIfNotEmpty(wc, "foo%s", "")
+
+ if wc.Count != 0 || n != 0 {
+ t.Errorf("Wrong count: %v vs. %v vs. 0", wc.Count, n)
+ }
+
+ n, _ = FprintfIfNotEmpty(wc, "foo%s", "bar")
+ if wc.Count != 6 || n != 6 {
+ t.Errorf("Wrong count: %v vs. %v vs. 6", wc.Count, n)
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go
new file mode 100644
index 00000000000..6dc50a03dc0
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go
@@ -0,0 +1,82 @@
+package ioutils
+
+import (
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a
+// temporary file and closing it atomically changes the temporary file to
+// destination path. Writing and closing concurrently is not allowed.
+func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) {
+ f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
+ if err != nil {
+ return nil, err
+ }
+
+ abspath, err := filepath.Abs(filename)
+ if err != nil {
+ return nil, err
+ }
+ return &atomicFileWriter{
+ f: f,
+ fn: abspath,
+ perm: perm,
+ }, nil
+}
+
+// AtomicWriteFile atomically writes data to a file named by filename.
+func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
+ f, err := NewAtomicFileWriter(filename, perm)
+ if err != nil {
+ return err
+ }
+ n, err := f.Write(data)
+ if err == nil && n < len(data) {
+ err = io.ErrShortWrite
+ f.(*atomicFileWriter).writeErr = err
+ }
+ if err1 := f.Close(); err == nil {
+ err = err1
+ }
+ return err
+}
+
+type atomicFileWriter struct {
+ f *os.File
+ fn string
+ writeErr error
+ perm os.FileMode
+}
+
+func (w *atomicFileWriter) Write(dt []byte) (int, error) {
+ n, err := w.f.Write(dt)
+ if err != nil {
+ w.writeErr = err
+ }
+ return n, err
+}
+
+func (w *atomicFileWriter) Close() (retErr error) {
+ defer func() {
+ if retErr != nil || w.writeErr != nil {
+ os.Remove(w.f.Name())
+ }
+ }()
+ if err := w.f.Sync(); err != nil {
+ w.f.Close()
+ return err
+ }
+ if err := w.f.Close(); err != nil {
+ return err
+ }
+ if err := os.Chmod(w.f.Name(), w.perm); err != nil {
+ return err
+ }
+ if w.writeErr == nil {
+ return os.Rename(w.f.Name(), w.fn)
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fswriters_test.go b/vendor/github.com/docker/docker/pkg/ioutils/fswriters_test.go
new file mode 100644
index 00000000000..470ca1a6f4a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/fswriters_test.go
@@ -0,0 +1,39 @@
+package ioutils
+
+import (
+ "bytes"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+func TestAtomicWriteToFile(t *testing.T) {
+ tmpDir, err := ioutil.TempDir("", "atomic-writers-test")
+ if err != nil {
+ t.Fatalf("Error when creating temporary directory: %s", err)
+ }
+ defer os.RemoveAll(tmpDir)
+
+ expected := []byte("barbaz")
+ if err := AtomicWriteFile(filepath.Join(tmpDir, "foo"), expected, 0666); err != nil {
+ t.Fatalf("Error writing to file: %v", err)
+ }
+
+ actual, err := ioutil.ReadFile(filepath.Join(tmpDir, "foo"))
+ if err != nil {
+ t.Fatalf("Error reading from file: %v", err)
+ }
+
+ if bytes.Compare(actual, expected) != 0 {
+ t.Fatalf("Data mismatch, expected %q, got %q", expected, actual)
+ }
+
+ st, err := os.Stat(filepath.Join(tmpDir, "foo"))
+ if err != nil {
+ t.Fatalf("Error statting file: %v", err)
+ }
+ if expected := os.FileMode(0666); st.Mode() != expected {
+ t.Fatalf("Mode mismatched, expected %o, got %o", expected, st.Mode())
+ }
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go b/vendor/github.com/docker/docker/pkg/ioutils/multireader.go
similarity index 99%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go
rename to vendor/github.com/docker/docker/pkg/ioutils/multireader.go
index f231aa9daf5..0d2d76b4797 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go
+++ b/vendor/github.com/docker/docker/pkg/ioutils/multireader.go
@@ -53,7 +53,7 @@ func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) {
}
if rdrOffset == s && i != len(r.readers)-1 {
- idx += 1
+ idx++
rdrOffset = 0
}
r.pos = &pos{idx, rdrOffset}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/multireader_test.go b/vendor/github.com/docker/docker/pkg/ioutils/multireader_test.go
new file mode 100644
index 00000000000..de495b56da4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/multireader_test.go
@@ -0,0 +1,149 @@
+package ioutils
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+ "testing"
+)
+
+func TestMultiReadSeekerReadAll(t *testing.T) {
+ str := "hello world"
+ s1 := strings.NewReader(str + " 1")
+ s2 := strings.NewReader(str + " 2")
+ s3 := strings.NewReader(str + " 3")
+ mr := MultiReadSeeker(s1, s2, s3)
+
+ expectedSize := int64(s1.Len() + s2.Len() + s3.Len())
+
+ b, err := ioutil.ReadAll(mr)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expected := "hello world 1hello world 2hello world 3"
+ if string(b) != expected {
+ t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected)
+ }
+
+ size, err := mr.Seek(0, os.SEEK_END)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if size != expectedSize {
+ t.Fatalf("reader size does not match, got %d, expected %d", size, expectedSize)
+ }
+
+ // Reset the position and read again
+ pos, err := mr.Seek(0, os.SEEK_SET)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if pos != 0 {
+ t.Fatalf("expected position to be set to 0, got %d", pos)
+ }
+
+ b, err = ioutil.ReadAll(mr)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if string(b) != expected {
+ t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected)
+ }
+}
+
+func TestMultiReadSeekerReadEach(t *testing.T) {
+ str := "hello world"
+ s1 := strings.NewReader(str + " 1")
+ s2 := strings.NewReader(str + " 2")
+ s3 := strings.NewReader(str + " 3")
+ mr := MultiReadSeeker(s1, s2, s3)
+
+ var totalBytes int64
+ for i, s := range []*strings.Reader{s1, s2, s3} {
+ sLen := int64(s.Len())
+ buf := make([]byte, s.Len())
+ expected := []byte(fmt.Sprintf("%s %d", str, i+1))
+
+ if _, err := mr.Read(buf); err != nil && err != io.EOF {
+ t.Fatal(err)
+ }
+
+ if !bytes.Equal(buf, expected) {
+ t.Fatalf("expected %q to be %q", string(buf), string(expected))
+ }
+
+ pos, err := mr.Seek(0, os.SEEK_CUR)
+ if err != nil {
+ t.Fatalf("iteration: %d, error: %v", i+1, err)
+ }
+
+ // check that the total bytes read is the current position of the seeker
+ totalBytes += sLen
+ if pos != totalBytes {
+ t.Fatalf("expected current position to be: %d, got: %d, iteration: %d", totalBytes, pos, i+1)
+ }
+
+ // This tests not only that SEEK_SET and SEEK_CUR give the same values, but that the next iteration is in the expected position as well
+ newPos, err := mr.Seek(pos, os.SEEK_SET)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if newPos != pos {
+ t.Fatalf("expected to get same position when calling SEEK_SET with value from SEEK_CUR, cur: %d, set: %d", pos, newPos)
+ }
+ }
+}
+
+func TestMultiReadSeekerReadSpanningChunks(t *testing.T) {
+ str := "hello world"
+ s1 := strings.NewReader(str + " 1")
+ s2 := strings.NewReader(str + " 2")
+ s3 := strings.NewReader(str + " 3")
+ mr := MultiReadSeeker(s1, s2, s3)
+
+ buf := make([]byte, s1.Len()+3)
+ _, err := mr.Read(buf)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // expected is the contents of s1 + 3 bytes from s2, ie, the `hel` at the end of this string
+ expected := "hello world 1hel"
+ if string(buf) != expected {
+ t.Fatalf("expected %s to be %s", string(buf), expected)
+ }
+}
+
+func TestMultiReadSeekerNegativeSeek(t *testing.T) {
+ str := "hello world"
+ s1 := strings.NewReader(str + " 1")
+ s2 := strings.NewReader(str + " 2")
+ s3 := strings.NewReader(str + " 3")
+ mr := MultiReadSeeker(s1, s2, s3)
+
+ s1Len := s1.Len()
+ s2Len := s2.Len()
+ s3Len := s3.Len()
+
+ s, err := mr.Seek(int64(-1*s3.Len()), os.SEEK_END)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if s != int64(s1Len+s2Len) {
+ t.Fatalf("expected %d to be %d", s, s1.Len()+s2.Len())
+ }
+
+ buf := make([]byte, s3Len)
+ if _, err := mr.Read(buf); err != nil && err != io.EOF {
+ t.Fatal(err)
+ }
+ expected := fmt.Sprintf("%s %d", str, 3)
+ if string(buf) != fmt.Sprintf("%s %d", str, 3) {
+ t.Fatalf("expected %q to be %q", string(buf), expected)
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go
new file mode 100644
index 00000000000..63f3c07f463
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/readers.go
@@ -0,0 +1,154 @@
+package ioutils
+
+import (
+ "crypto/sha256"
+ "encoding/hex"
+ "io"
+
+ "golang.org/x/net/context"
+)
+
+type readCloserWrapper struct {
+ io.Reader
+ closer func() error
+}
+
+func (r *readCloserWrapper) Close() error {
+ return r.closer()
+}
+
+// NewReadCloserWrapper returns a new io.ReadCloser.
+func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser {
+ return &readCloserWrapper{
+ Reader: r,
+ closer: closer,
+ }
+}
+
+type readerErrWrapper struct {
+ reader io.Reader
+ closer func()
+}
+
+func (r *readerErrWrapper) Read(p []byte) (int, error) {
+ n, err := r.reader.Read(p)
+ if err != nil {
+ r.closer()
+ }
+ return n, err
+}
+
+// NewReaderErrWrapper returns a new io.Reader.
+func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader {
+ return &readerErrWrapper{
+ reader: r,
+ closer: closer,
+ }
+}
+
+// HashData returns the sha256 sum of src.
+func HashData(src io.Reader) (string, error) {
+ h := sha256.New()
+ if _, err := io.Copy(h, src); err != nil {
+ return "", err
+ }
+ return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
+}
+
+// OnEOFReader wraps an io.ReadCloser and a function
+// the function will run at the end of file or close the file.
+type OnEOFReader struct {
+ Rc io.ReadCloser
+ Fn func()
+}
+
+func (r *OnEOFReader) Read(p []byte) (n int, err error) {
+ n, err = r.Rc.Read(p)
+ if err == io.EOF {
+ r.runFunc()
+ }
+ return
+}
+
+// Close closes the file and run the function.
+func (r *OnEOFReader) Close() error {
+ err := r.Rc.Close()
+ r.runFunc()
+ return err
+}
+
+func (r *OnEOFReader) runFunc() {
+ if fn := r.Fn; fn != nil {
+ fn()
+ r.Fn = nil
+ }
+}
+
+// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read
+// operations.
+type cancelReadCloser struct {
+ cancel func()
+ pR *io.PipeReader // Stream to read from
+ pW *io.PipeWriter
+}
+
+// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the
+// context is cancelled. The returned io.ReadCloser must be closed when it is
+// no longer needed.
+func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser {
+ pR, pW := io.Pipe()
+
+ // Create a context used to signal when the pipe is closed
+ doneCtx, cancel := context.WithCancel(context.Background())
+
+ p := &cancelReadCloser{
+ cancel: cancel,
+ pR: pR,
+ pW: pW,
+ }
+
+ go func() {
+ _, err := io.Copy(pW, in)
+ select {
+ case <-ctx.Done():
+ // If the context was closed, p.closeWithError
+ // was already called. Calling it again would
+ // change the error that Read returns.
+ default:
+ p.closeWithError(err)
+ }
+ in.Close()
+ }()
+ go func() {
+ for {
+ select {
+ case <-ctx.Done():
+ p.closeWithError(ctx.Err())
+ case <-doneCtx.Done():
+ return
+ }
+ }
+ }()
+
+ return p
+}
+
+// Read wraps the Read method of the pipe that provides data from the wrapped
+// ReadCloser.
+func (p *cancelReadCloser) Read(buf []byte) (n int, err error) {
+ return p.pR.Read(buf)
+}
+
+// closeWithError closes the wrapper and its underlying reader. It will
+// cause future calls to Read to return err.
+func (p *cancelReadCloser) closeWithError(err error) {
+ p.pW.CloseWithError(err)
+ p.cancel()
+}
+
+// Close closes the wrapper its underlying reader. It will cause
+// future calls to Read to return io.EOF.
+func (p *cancelReadCloser) Close() error {
+ p.closeWithError(io.EOF)
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers_test.go b/vendor/github.com/docker/docker/pkg/ioutils/readers_test.go
new file mode 100644
index 00000000000..9abc1054df3
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/readers_test.go
@@ -0,0 +1,94 @@
+package ioutils
+
+import (
+ "fmt"
+ "io/ioutil"
+ "strings"
+ "testing"
+ "time"
+
+ "golang.org/x/net/context"
+)
+
+// Implement io.Reader
+type errorReader struct{}
+
+func (r *errorReader) Read(p []byte) (int, error) {
+ return 0, fmt.Errorf("Error reader always fail.")
+}
+
+func TestReadCloserWrapperClose(t *testing.T) {
+ reader := strings.NewReader("A string reader")
+ wrapper := NewReadCloserWrapper(reader, func() error {
+ return fmt.Errorf("This will be called when closing")
+ })
+ err := wrapper.Close()
+ if err == nil || !strings.Contains(err.Error(), "This will be called when closing") {
+ t.Fatalf("readCloserWrapper should have call the anonymous func and thus, fail.")
+ }
+}
+
+func TestReaderErrWrapperReadOnError(t *testing.T) {
+ called := false
+ reader := &errorReader{}
+ wrapper := NewReaderErrWrapper(reader, func() {
+ called = true
+ })
+ _, err := wrapper.Read([]byte{})
+ if err == nil || !strings.Contains(err.Error(), "Error reader always fail.") {
+ t.Fatalf("readErrWrapper should returned an error")
+ }
+ if !called {
+ t.Fatalf("readErrWrapper should have call the anonymous function on failure")
+ }
+}
+
+func TestReaderErrWrapperRead(t *testing.T) {
+ reader := strings.NewReader("a string reader.")
+ wrapper := NewReaderErrWrapper(reader, func() {
+ t.Fatalf("readErrWrapper should not have called the anonymous function")
+ })
+ // Read 20 byte (should be ok with the string above)
+ num, err := wrapper.Read(make([]byte, 20))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if num != 16 {
+ t.Fatalf("readerErrWrapper should have read 16 byte, but read %d", num)
+ }
+}
+
+func TestHashData(t *testing.T) {
+ reader := strings.NewReader("hash-me")
+ actual, err := HashData(reader)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expected := "sha256:4d11186aed035cc624d553e10db358492c84a7cd6b9670d92123c144930450aa"
+ if actual != expected {
+ t.Fatalf("Expecting %s, got %s", expected, actual)
+ }
+}
+
+type perpetualReader struct{}
+
+func (p *perpetualReader) Read(buf []byte) (n int, err error) {
+ for i := 0; i != len(buf); i++ {
+ buf[i] = 'a'
+ }
+ return len(buf), nil
+}
+
+func TestCancelReadCloser(t *testing.T) {
+ ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond)
+ cancelReadCloser := NewCancelReadCloser(ctx, ioutil.NopCloser(&perpetualReader{}))
+ for {
+ var buf [128]byte
+ _, err := cancelReadCloser.Read(buf[:])
+ if err == context.DeadlineExceeded {
+ break
+ } else if err != nil {
+ t.Fatalf("got unexpected error: %v", err)
+ }
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go
new file mode 100644
index 00000000000..1539ad21b57
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go
@@ -0,0 +1,10 @@
+// +build !windows
+
+package ioutils
+
+import "io/ioutil"
+
+// TempDir on Unix systems is equivalent to ioutil.TempDir.
+func TempDir(dir, prefix string) (string, error) {
+ return ioutil.TempDir(dir, prefix)
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go
new file mode 100644
index 00000000000..c258e5fdd87
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go
@@ -0,0 +1,18 @@
+// +build windows
+
+package ioutils
+
+import (
+ "io/ioutil"
+
+ "github.com/docker/docker/pkg/longpath"
+)
+
+// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format.
+func TempDir(dir, prefix string) (string, error) {
+ tempDir, err := ioutil.TempDir(dir, prefix)
+ if err != nil {
+ return "", err
+ }
+ return longpath.AddPrefix(tempDir), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go
new file mode 100644
index 00000000000..52a4901adeb
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go
@@ -0,0 +1,92 @@
+package ioutils
+
+import (
+ "io"
+ "sync"
+)
+
+// WriteFlusher wraps the Write and Flush operation ensuring that every write
+// is a flush. In addition, the Close method can be called to intercept
+// Read/Write calls if the targets lifecycle has already ended.
+type WriteFlusher struct {
+ w io.Writer
+ flusher flusher
+ flushed chan struct{}
+ flushedOnce sync.Once
+ closed chan struct{}
+ closeLock sync.Mutex
+}
+
+type flusher interface {
+ Flush()
+}
+
+var errWriteFlusherClosed = io.EOF
+
+func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
+ select {
+ case <-wf.closed:
+ return 0, errWriteFlusherClosed
+ default:
+ }
+
+ n, err = wf.w.Write(b)
+ wf.Flush() // every write is a flush.
+ return n, err
+}
+
+// Flush the stream immediately.
+func (wf *WriteFlusher) Flush() {
+ select {
+ case <-wf.closed:
+ return
+ default:
+ }
+
+ wf.flushedOnce.Do(func() {
+ close(wf.flushed)
+ })
+ wf.flusher.Flush()
+}
+
+// Flushed returns the state of flushed.
+// If it's flushed, return true, or else it return false.
+func (wf *WriteFlusher) Flushed() bool {
+ // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to
+ // be used to detect whether or a response code has been issued or not.
+ // Another hook should be used instead.
+ var flushed bool
+ select {
+ case <-wf.flushed:
+ flushed = true
+ default:
+ }
+ return flushed
+}
+
+// Close closes the write flusher, disallowing any further writes to the
+// target. After the flusher is closed, all calls to write or flush will
+// result in an error.
+func (wf *WriteFlusher) Close() error {
+ wf.closeLock.Lock()
+ defer wf.closeLock.Unlock()
+
+ select {
+ case <-wf.closed:
+ return errWriteFlusherClosed
+ default:
+ close(wf.closed)
+ }
+ return nil
+}
+
+// NewWriteFlusher returns a new WriteFlusher.
+func NewWriteFlusher(w io.Writer) *WriteFlusher {
+ var fl flusher
+ if f, ok := w.(flusher); ok {
+ fl = f
+ } else {
+ fl = &NopFlusher{}
+ }
+ return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})}
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/docker/docker/pkg/ioutils/writers.go
similarity index 74%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go
rename to vendor/github.com/docker/docker/pkg/ioutils/writers.go
index 43fdc44ea96..ccc7f9c23e0 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go
+++ b/vendor/github.com/docker/docker/pkg/ioutils/writers.go
@@ -2,6 +2,7 @@ package ioutils
import "io"
+// NopWriter represents a type which write operation is nop.
type NopWriter struct{}
func (*NopWriter) Write(buf []byte) (int, error) {
@@ -14,12 +15,15 @@ type nopWriteCloser struct {
func (w *nopWriteCloser) Close() error { return nil }
+// NopWriteCloser returns a nopWriteCloser.
func NopWriteCloser(w io.Writer) io.WriteCloser {
return &nopWriteCloser{w}
}
+// NopFlusher represents a type which flush operation is nop.
type NopFlusher struct{}
+// Flush is a nop operation.
func (f *NopFlusher) Flush() {}
type writeCloserWrapper struct {
@@ -31,6 +35,7 @@ func (r *writeCloserWrapper) Close() error {
return r.closer()
}
+// NewWriteCloserWrapper returns a new io.WriteCloser.
func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
return &writeCloserWrapper{
Writer: r,
@@ -38,7 +43,7 @@ func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
}
}
-// Wrap a concrete io.Writer and hold a count of the number
+// WriteCounter wraps a concrete io.Writer and hold a count of the number
// of bytes written to the writer during a "session".
// This can be convenient when write return is masked
// (e.g., json.Encoder.Encode())
@@ -47,6 +52,7 @@ type WriteCounter struct {
Writer io.Writer
}
+// NewWriteCounter returns a new WriteCounter.
func NewWriteCounter(w io.Writer) *WriteCounter {
return &WriteCounter{
Writer: w,
diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers_test.go b/vendor/github.com/docker/docker/pkg/ioutils/writers_test.go
new file mode 100644
index 00000000000..564b1cd4f5f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/ioutils/writers_test.go
@@ -0,0 +1,65 @@
+package ioutils
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+)
+
+func TestWriteCloserWrapperClose(t *testing.T) {
+ called := false
+ writer := bytes.NewBuffer([]byte{})
+ wrapper := NewWriteCloserWrapper(writer, func() error {
+ called = true
+ return nil
+ })
+ if err := wrapper.Close(); err != nil {
+ t.Fatal(err)
+ }
+ if !called {
+ t.Fatalf("writeCloserWrapper should have call the anonymous function.")
+ }
+}
+
+func TestNopWriteCloser(t *testing.T) {
+ writer := bytes.NewBuffer([]byte{})
+ wrapper := NopWriteCloser(writer)
+ if err := wrapper.Close(); err != nil {
+ t.Fatal("NopWriteCloser always return nil on Close.")
+ }
+
+}
+
+func TestNopWriter(t *testing.T) {
+ nw := &NopWriter{}
+ l, err := nw.Write([]byte{'c'})
+ if err != nil {
+ t.Fatal(err)
+ }
+ if l != 1 {
+ t.Fatalf("Expected 1 got %d", l)
+ }
+}
+
+func TestWriteCounter(t *testing.T) {
+ dummy1 := "This is a dummy string."
+ dummy2 := "This is another dummy string."
+ totalLength := int64(len(dummy1) + len(dummy2))
+
+ reader1 := strings.NewReader(dummy1)
+ reader2 := strings.NewReader(dummy2)
+
+ var buffer bytes.Buffer
+ wc := NewWriteCounter(&buffer)
+
+ reader1.WriteTo(wc)
+ reader2.WriteTo(wc)
+
+ if wc.Count != totalLength {
+ t.Errorf("Wrong count: %d vs. %d", wc.Count, totalLength)
+ }
+
+ if buffer.String() != dummy1+dummy2 {
+ t.Error("Wrong message written")
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go
new file mode 100644
index 00000000000..4734c311196
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go
@@ -0,0 +1,42 @@
+package jsonlog
+
+import (
+ "encoding/json"
+ "fmt"
+ "time"
+)
+
+// JSONLog represents a log message, typically a single entry from a given log stream.
+// JSONLogs can be easily serialized to and from JSON and support custom formatting.
+type JSONLog struct {
+ // Log is the log message
+ Log string `json:"log,omitempty"`
+ // Stream is the log source
+ Stream string `json:"stream,omitempty"`
+ // Created is the created timestamp of log
+ Created time.Time `json:"time"`
+ // Attrs is the list of extra attributes provided by the user
+ Attrs map[string]string `json:"attrs,omitempty"`
+}
+
+// Format returns the log formatted according to format
+// If format is nil, returns the log message
+// If format is json, returns the log marshaled in json format
+// By default, returns the log with the log time formatted according to format.
+func (jl *JSONLog) Format(format string) (string, error) {
+ if format == "" {
+ return jl.Log, nil
+ }
+ if format == "json" {
+ m, err := json.Marshal(jl)
+ return string(m), err
+ }
+ return fmt.Sprintf("%s %s", jl.Created.Format(format), jl.Log), nil
+}
+
+// Reset resets the log to nil.
+func (jl *JSONLog) Reset() {
+ jl.Log = ""
+ jl.Stream = ""
+ jl.Created = time.Time{}
+}
diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go
new file mode 100644
index 00000000000..83ce684a8ef
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go
@@ -0,0 +1,178 @@
+// This code was initially generated by ffjson
+// This code was generated via the following steps:
+// $ go get -u github.com/pquerna/ffjson
+// $ make BIND_DIR=. shell
+// $ ffjson pkg/jsonlog/jsonlog.go
+// $ mv pkg/jsonglog/jsonlog_ffjson.go pkg/jsonlog/jsonlog_marshalling.go
+//
+// It has been modified to improve the performance of time marshalling to JSON
+// and to clean it up.
+// Should this code need to be regenerated when the JSONLog struct is changed,
+// the relevant changes which have been made are:
+// import (
+// "bytes"
+//-
+// "unicode/utf8"
+// )
+//
+// func (mj *JSONLog) MarshalJSON() ([]byte, error) {
+//@@ -20,13 +16,13 @@ func (mj *JSONLog) MarshalJSON() ([]byte, error) {
+// }
+// return buf.Bytes(), nil
+// }
+//+
+// func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error {
+//- var err error
+//- var obj []byte
+//- var first bool = true
+//- _ = obj
+//- _ = err
+//- _ = first
+//+ var (
+//+ err error
+//+ timestamp string
+//+ first bool = true
+//+ )
+// buf.WriteString(`{`)
+// if len(mj.Log) != 0 {
+// if first == true {
+//@@ -52,11 +48,11 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error {
+// buf.WriteString(`,`)
+// }
+// buf.WriteString(`"time":`)
+//- obj, err = mj.Created.MarshalJSON()
+//+ timestamp, err = FastTimeMarshalJSON(mj.Created)
+// if err != nil {
+// return err
+// }
+//- buf.Write(obj)
+//+ buf.WriteString(timestamp)
+// buf.WriteString(`}`)
+// return nil
+// }
+// @@ -81,9 +81,10 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error {
+// if len(mj.Log) != 0 {
+// - if first == true {
+// - first = false
+// - } else {
+// - buf.WriteString(`,`)
+// - }
+// + first = false
+// buf.WriteString(`"log":`)
+// ffjsonWriteJSONString(buf, mj.Log)
+// }
+
+package jsonlog
+
+import (
+ "bytes"
+ "unicode/utf8"
+)
+
+// MarshalJSON marshals the JSONLog.
+func (mj *JSONLog) MarshalJSON() ([]byte, error) {
+ var buf bytes.Buffer
+ buf.Grow(1024)
+ if err := mj.MarshalJSONBuf(&buf); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// MarshalJSONBuf marshals the JSONLog and stores the result to a bytes.Buffer.
+func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error {
+ var (
+ err error
+ timestamp string
+ first = true
+ )
+ buf.WriteString(`{`)
+ if len(mj.Log) != 0 {
+ first = false
+ buf.WriteString(`"log":`)
+ ffjsonWriteJSONString(buf, mj.Log)
+ }
+ if len(mj.Stream) != 0 {
+ if first {
+ first = false
+ } else {
+ buf.WriteString(`,`)
+ }
+ buf.WriteString(`"stream":`)
+ ffjsonWriteJSONString(buf, mj.Stream)
+ }
+ if !first {
+ buf.WriteString(`,`)
+ }
+ buf.WriteString(`"time":`)
+ timestamp, err = FastTimeMarshalJSON(mj.Created)
+ if err != nil {
+ return err
+ }
+ buf.WriteString(timestamp)
+ buf.WriteString(`}`)
+ return nil
+}
+
+func ffjsonWriteJSONString(buf *bytes.Buffer, s string) {
+ const hex = "0123456789abcdef"
+
+ buf.WriteByte('"')
+ start := 0
+ for i := 0; i < len(s); {
+ if b := s[i]; b < utf8.RuneSelf {
+ if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
+ i++
+ continue
+ }
+ if start < i {
+ buf.WriteString(s[start:i])
+ }
+ switch b {
+ case '\\', '"':
+ buf.WriteByte('\\')
+ buf.WriteByte(b)
+ case '\n':
+ buf.WriteByte('\\')
+ buf.WriteByte('n')
+ case '\r':
+ buf.WriteByte('\\')
+ buf.WriteByte('r')
+ default:
+
+ buf.WriteString(`\u00`)
+ buf.WriteByte(hex[b>>4])
+ buf.WriteByte(hex[b&0xF])
+ }
+ i++
+ start = i
+ continue
+ }
+ c, size := utf8.DecodeRuneInString(s[i:])
+ if c == utf8.RuneError && size == 1 {
+ if start < i {
+ buf.WriteString(s[start:i])
+ }
+ buf.WriteString(`\ufffd`)
+ i += size
+ start = i
+ continue
+ }
+
+ if c == '\u2028' || c == '\u2029' {
+ if start < i {
+ buf.WriteString(s[start:i])
+ }
+ buf.WriteString(`\u202`)
+ buf.WriteByte(hex[c&0xF])
+ i += size
+ start = i
+ continue
+ }
+ i += size
+ }
+ if start < len(s) {
+ buf.WriteString(s[start:])
+ }
+ buf.WriteByte('"')
+}
diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling_test.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling_test.go
new file mode 100644
index 00000000000..3edb2714109
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling_test.go
@@ -0,0 +1,34 @@
+package jsonlog
+
+import (
+ "regexp"
+ "testing"
+)
+
+func TestJSONLogMarshalJSON(t *testing.T) {
+ logs := map[*JSONLog]string{
+ &JSONLog{Log: `"A log line with \\"`}: `^{\"log\":\"\\\"A log line with \\\\\\\\\\\"\",\"time\":\".{20,}\"}$`,
+ &JSONLog{Log: "A log line"}: `^{\"log\":\"A log line\",\"time\":\".{20,}\"}$`,
+ &JSONLog{Log: "A log line with \r"}: `^{\"log\":\"A log line with \\r\",\"time\":\".{20,}\"}$`,
+ &JSONLog{Log: "A log line with & < >"}: `^{\"log\":\"A log line with \\u0026 \\u003c \\u003e\",\"time\":\".{20,}\"}$`,
+ &JSONLog{Log: "A log line with utf8 : 🚀 ψ ω β"}: `^{\"log\":\"A log line with utf8 : 🚀 ψ ω β\",\"time\":\".{20,}\"}$`,
+ &JSONLog{Stream: "stdout"}: `^{\"stream\":\"stdout\",\"time\":\".{20,}\"}$`,
+ &JSONLog{}: `^{\"time\":\".{20,}\"}$`,
+ // These ones are a little weird
+ &JSONLog{Log: "\u2028 \u2029"}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":\".{20,}\"}$`,
+ &JSONLog{Log: string([]byte{0xaF})}: `^{\"log\":\"\\ufffd\",\"time\":\".{20,}\"}$`,
+ &JSONLog{Log: string([]byte{0x7F})}: `^{\"log\":\"\x7f\",\"time\":\".{20,}\"}$`,
+ }
+ for jsonLog, expression := range logs {
+ data, err := jsonLog.MarshalJSON()
+ if err != nil {
+ t.Fatal(err)
+ }
+ res := string(data)
+ t.Logf("Result of WriteLog: %q", res)
+ logRe := regexp.MustCompile(expression)
+ if !logRe.MatchString(res) {
+ t.Fatalf("Log line not in expected format [%v]: %q", expression, res)
+ }
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go
new file mode 100644
index 00000000000..df522c0d66f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go
@@ -0,0 +1,122 @@
+package jsonlog
+
+import (
+ "bytes"
+ "encoding/json"
+ "unicode/utf8"
+)
+
+// JSONLogs is based on JSONLog.
+// It allows marshalling JSONLog from Log as []byte
+// and an already marshalled Created timestamp.
+type JSONLogs struct {
+ Log []byte `json:"log,omitempty"`
+ Stream string `json:"stream,omitempty"`
+ Created string `json:"time"`
+
+ // json-encoded bytes
+ RawAttrs json.RawMessage `json:"attrs,omitempty"`
+}
+
+// MarshalJSONBuf is based on the same method from JSONLog
+// It has been modified to take into account the necessary changes.
+func (mj *JSONLogs) MarshalJSONBuf(buf *bytes.Buffer) error {
+ var first = true
+
+ buf.WriteString(`{`)
+ if len(mj.Log) != 0 {
+ first = false
+ buf.WriteString(`"log":`)
+ ffjsonWriteJSONBytesAsString(buf, mj.Log)
+ }
+ if len(mj.Stream) != 0 {
+ if first == true {
+ first = false
+ } else {
+ buf.WriteString(`,`)
+ }
+ buf.WriteString(`"stream":`)
+ ffjsonWriteJSONString(buf, mj.Stream)
+ }
+ if len(mj.RawAttrs) > 0 {
+ if first {
+ first = false
+ } else {
+ buf.WriteString(`,`)
+ }
+ buf.WriteString(`"attrs":`)
+ buf.Write(mj.RawAttrs)
+ }
+ if !first {
+ buf.WriteString(`,`)
+ }
+ buf.WriteString(`"time":`)
+ buf.WriteString(mj.Created)
+ buf.WriteString(`}`)
+ return nil
+}
+
+// This is based on ffjsonWriteJSONBytesAsString. It has been changed
+// to accept a string passed as a slice of bytes.
+func ffjsonWriteJSONBytesAsString(buf *bytes.Buffer, s []byte) {
+ const hex = "0123456789abcdef"
+
+ buf.WriteByte('"')
+ start := 0
+ for i := 0; i < len(s); {
+ if b := s[i]; b < utf8.RuneSelf {
+ if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
+ i++
+ continue
+ }
+ if start < i {
+ buf.Write(s[start:i])
+ }
+ switch b {
+ case '\\', '"':
+ buf.WriteByte('\\')
+ buf.WriteByte(b)
+ case '\n':
+ buf.WriteByte('\\')
+ buf.WriteByte('n')
+ case '\r':
+ buf.WriteByte('\\')
+ buf.WriteByte('r')
+ default:
+
+ buf.WriteString(`\u00`)
+ buf.WriteByte(hex[b>>4])
+ buf.WriteByte(hex[b&0xF])
+ }
+ i++
+ start = i
+ continue
+ }
+ c, size := utf8.DecodeRune(s[i:])
+ if c == utf8.RuneError && size == 1 {
+ if start < i {
+ buf.Write(s[start:i])
+ }
+ buf.WriteString(`\ufffd`)
+ i += size
+ start = i
+ continue
+ }
+
+ if c == '\u2028' || c == '\u2029' {
+ if start < i {
+ buf.Write(s[start:i])
+ }
+ buf.WriteString(`\u202`)
+ buf.WriteByte(hex[c&0xF])
+ i += size
+ start = i
+ continue
+ }
+ i += size
+ }
+ if start < len(s) {
+ buf.Write(s[start:])
+ }
+ buf.WriteByte('"')
+}
diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes_test.go b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes_test.go
new file mode 100644
index 00000000000..6d6ad21583e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes_test.go
@@ -0,0 +1,39 @@
+package jsonlog
+
+import (
+ "bytes"
+ "regexp"
+ "testing"
+)
+
+func TestJSONLogsMarshalJSONBuf(t *testing.T) {
+ logs := map[*JSONLogs]string{
+ &JSONLogs{Log: []byte(`"A log line with \\"`)}: `^{\"log\":\"\\\"A log line with \\\\\\\\\\\"\",\"time\":}$`,
+ &JSONLogs{Log: []byte("A log line")}: `^{\"log\":\"A log line\",\"time\":}$`,
+ &JSONLogs{Log: []byte("A log line with \r")}: `^{\"log\":\"A log line with \\r\",\"time\":}$`,
+ &JSONLogs{Log: []byte("A log line with & < >")}: `^{\"log\":\"A log line with \\u0026 \\u003c \\u003e\",\"time\":}$`,
+ &JSONLogs{Log: []byte("A log line with utf8 : 🚀 ψ ω β")}: `^{\"log\":\"A log line with utf8 : 🚀 ψ ω β\",\"time\":}$`,
+ &JSONLogs{Stream: "stdout"}: `^{\"stream\":\"stdout\",\"time\":}$`,
+ &JSONLogs{Stream: "stdout", Log: []byte("A log line")}: `^{\"log\":\"A log line\",\"stream\":\"stdout\",\"time\":}$`,
+ &JSONLogs{Created: "time"}: `^{\"time\":time}$`,
+ &JSONLogs{}: `^{\"time\":}$`,
+ // These ones are a little weird
+ &JSONLogs{Log: []byte("\u2028 \u2029")}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":}$`,
+ &JSONLogs{Log: []byte{0xaF}}: `^{\"log\":\"\\ufffd\",\"time\":}$`,
+ &JSONLogs{Log: []byte{0x7F}}: `^{\"log\":\"\x7f\",\"time\":}$`,
+ // with raw attributes
+ &JSONLogs{Log: []byte("A log line"), RawAttrs: []byte(`{"hello":"world","value":1234}`)}: `^{\"log\":\"A log line\",\"attrs\":{\"hello\":\"world\",\"value\":1234},\"time\":}$`,
+ }
+ for jsonLog, expression := range logs {
+ var buf bytes.Buffer
+ if err := jsonLog.MarshalJSONBuf(&buf); err != nil {
+ t.Fatal(err)
+ }
+ res := buf.String()
+ t.Logf("Result of WriteLog: %q", res)
+ logRe := regexp.MustCompile(expression)
+ if !logRe.MatchString(res) {
+ t.Fatalf("Log line not in expected format [%v]: %q", expression, res)
+ }
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go b/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go
new file mode 100644
index 00000000000..21173381495
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go
@@ -0,0 +1,27 @@
+// Package jsonlog provides helper functions to parse and print time (time.Time) as JSON.
+package jsonlog
+
+import (
+ "errors"
+ "time"
+)
+
+const (
+ // RFC3339NanoFixed is our own version of RFC339Nano because we want one
+ // that pads the nano seconds part with zeros to ensure
+ // the timestamps are aligned in the logs.
+ RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
+ // JSONFormat is the format used by FastMarshalJSON
+ JSONFormat = `"` + time.RFC3339Nano + `"`
+)
+
+// FastTimeMarshalJSON avoids one of the extra allocations that
+// time.MarshalJSON is making.
+func FastTimeMarshalJSON(t time.Time) (string, error) {
+ if y := t.Year(); y < 0 || y >= 10000 {
+ // RFC 3339 is clear that years are 4 digits exactly.
+ // See golang.org/issue/4556#c15 for more discussion.
+ return "", errors.New("time.MarshalJSON: year outside of range [0,9999]")
+ }
+ return t.Format(JSONFormat), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling_test.go b/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling_test.go
new file mode 100644
index 00000000000..02d0302c4a0
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling_test.go
@@ -0,0 +1,47 @@
+package jsonlog
+
+import (
+ "testing"
+ "time"
+)
+
+// Testing to ensure 'year' fields is between 0 and 9999
+func TestFastTimeMarshalJSONWithInvalidDate(t *testing.T) {
+ aTime := time.Date(-1, 1, 1, 0, 0, 0, 0, time.Local)
+ json, err := FastTimeMarshalJSON(aTime)
+ if err == nil {
+ t.Fatalf("FastTimeMarshalJSON should throw an error, but was '%v'", json)
+ }
+ anotherTime := time.Date(10000, 1, 1, 0, 0, 0, 0, time.Local)
+ json, err = FastTimeMarshalJSON(anotherTime)
+ if err == nil {
+ t.Fatalf("FastTimeMarshalJSON should throw an error, but was '%v'", json)
+ }
+
+}
+
+func TestFastTimeMarshalJSON(t *testing.T) {
+ aTime := time.Date(2015, 5, 29, 11, 1, 2, 3, time.UTC)
+ json, err := FastTimeMarshalJSON(aTime)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expected := "\"2015-05-29T11:01:02.000000003Z\""
+ if json != expected {
+ t.Fatalf("Expected %v, got %v", expected, json)
+ }
+
+ location, err := time.LoadLocation("Europe/Paris")
+ if err != nil {
+ t.Fatal(err)
+ }
+ aTime = time.Date(2015, 5, 29, 11, 1, 2, 3, location)
+ json, err = FastTimeMarshalJSON(aTime)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expected = "\"2015-05-29T11:01:02.000000003+02:00\""
+ if json != expected {
+ t.Fatalf("Expected %v, got %v", expected, json)
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
new file mode 100644
index 00000000000..91b073b731a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
@@ -0,0 +1,221 @@
+package jsonmessage
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "strings"
+ "time"
+
+ "github.com/docker/docker/pkg/jsonlog"
+ "github.com/docker/docker/pkg/term"
+ "github.com/docker/go-units"
+)
+
+// JSONError wraps a concrete Code and Message, `Code` is
+// is an integer error code, `Message` is the error message.
+type JSONError struct {
+ Code int `json:"code,omitempty"`
+ Message string `json:"message,omitempty"`
+}
+
+func (e *JSONError) Error() string {
+ return e.Message
+}
+
+// JSONProgress describes a Progress. terminalFd is the fd of the current terminal,
+// Start is the initial value for the operation. Current is the current status and
+// value of the progress made towards Total. Total is the end value describing when
+// we made 100% progress for an operation.
+type JSONProgress struct {
+ terminalFd uintptr
+ Current int64 `json:"current,omitempty"`
+ Total int64 `json:"total,omitempty"`
+ Start int64 `json:"start,omitempty"`
+}
+
+func (p *JSONProgress) String() string {
+ var (
+ width = 200
+ pbBox string
+ numbersBox string
+ timeLeftBox string
+ )
+
+ ws, err := term.GetWinsize(p.terminalFd)
+ if err == nil {
+ width = int(ws.Width)
+ }
+
+ if p.Current <= 0 && p.Total <= 0 {
+ return ""
+ }
+ current := units.HumanSize(float64(p.Current))
+ if p.Total <= 0 {
+ return fmt.Sprintf("%8v", current)
+ }
+ total := units.HumanSize(float64(p.Total))
+ percentage := int(float64(p.Current)/float64(p.Total)*100) / 2
+ if percentage > 50 {
+ percentage = 50
+ }
+ if width > 110 {
+ // this number can't be negative gh#7136
+ numSpaces := 0
+ if 50-percentage > 0 {
+ numSpaces = 50 - percentage
+ }
+ pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces))
+ }
+
+ numbersBox = fmt.Sprintf("%8v/%v", current, total)
+
+ if p.Current > p.Total {
+ // remove total display if the reported current is wonky.
+ numbersBox = fmt.Sprintf("%8v", current)
+ }
+
+ if p.Current > 0 && p.Start > 0 && percentage < 50 {
+ fromStart := time.Now().UTC().Sub(time.Unix(p.Start, 0))
+ perEntry := fromStart / time.Duration(p.Current)
+ left := time.Duration(p.Total-p.Current) * perEntry
+ left = (left / time.Second) * time.Second
+
+ if width > 50 {
+ timeLeftBox = " " + left.String()
+ }
+ }
+ return pbBox + numbersBox + timeLeftBox
+}
+
+// JSONMessage defines a message struct. It describes
+// the created time, where it from, status, ID of the
+// message. It's used for docker events.
+type JSONMessage struct {
+ Stream string `json:"stream,omitempty"`
+ Status string `json:"status,omitempty"`
+ Progress *JSONProgress `json:"progressDetail,omitempty"`
+ ProgressMessage string `json:"progress,omitempty"` //deprecated
+ ID string `json:"id,omitempty"`
+ From string `json:"from,omitempty"`
+ Time int64 `json:"time,omitempty"`
+ TimeNano int64 `json:"timeNano,omitempty"`
+ Error *JSONError `json:"errorDetail,omitempty"`
+ ErrorMessage string `json:"error,omitempty"` //deprecated
+ // Aux contains out-of-band data, such as digests for push signing.
+ Aux *json.RawMessage `json:"aux,omitempty"`
+}
+
+// Display displays the JSONMessage to `out`. `isTerminal` describes if `out`
+// is a terminal. If this is the case, it will erase the entire current line
+// when displaying the progressbar.
+func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error {
+ if jm.Error != nil {
+ if jm.Error.Code == 401 {
+ return fmt.Errorf("Authentication is required.")
+ }
+ return jm.Error
+ }
+ var endl string
+ if isTerminal && jm.Stream == "" && jm.Progress != nil {
+ // [2K = erase entire current line
+ fmt.Fprintf(out, "%c[2K\r", 27)
+ endl = "\r"
+ } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal
+ return nil
+ }
+ if jm.TimeNano != 0 {
+ fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(jsonlog.RFC3339NanoFixed))
+ } else if jm.Time != 0 {
+ fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(jsonlog.RFC3339NanoFixed))
+ }
+ if jm.ID != "" {
+ fmt.Fprintf(out, "%s: ", jm.ID)
+ }
+ if jm.From != "" {
+ fmt.Fprintf(out, "(from %s) ", jm.From)
+ }
+ if jm.Progress != nil && isTerminal {
+ fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl)
+ } else if jm.ProgressMessage != "" { //deprecated
+ fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl)
+ } else if jm.Stream != "" {
+ fmt.Fprintf(out, "%s%s", jm.Stream, endl)
+ } else {
+ fmt.Fprintf(out, "%s%s\n", jm.Status, endl)
+ }
+ return nil
+}
+
+// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal`
+// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of
+// each line and move the cursor while displaying.
+func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(*json.RawMessage)) error {
+ var (
+ dec = json.NewDecoder(in)
+ ids = make(map[string]int)
+ )
+ for {
+ diff := 0
+ var jm JSONMessage
+ if err := dec.Decode(&jm); err != nil {
+ if err == io.EOF {
+ break
+ }
+ return err
+ }
+
+ if jm.Aux != nil {
+ if auxCallback != nil {
+ auxCallback(jm.Aux)
+ }
+ continue
+ }
+
+ if jm.Progress != nil {
+ jm.Progress.terminalFd = terminalFd
+ }
+ if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") {
+ line, ok := ids[jm.ID]
+ if !ok {
+ // NOTE: This approach of using len(id) to
+ // figure out the number of lines of history
+ // only works as long as we clear the history
+ // when we output something that's not
+ // accounted for in the map, such as a line
+ // with no ID.
+ line = len(ids)
+ ids[jm.ID] = line
+ if isTerminal {
+ fmt.Fprintf(out, "\n")
+ }
+ } else {
+ diff = len(ids) - line
+ }
+ if isTerminal {
+ // NOTE: this appears to be necessary even if
+ // diff == 0.
+ // [{diff}A = move cursor up diff rows
+ fmt.Fprintf(out, "%c[%dA", 27, diff)
+ }
+ } else {
+ // When outputting something that isn't progress
+ // output, clear the history of previous lines. We
+ // don't want progress entries from some previous
+ // operation to be updated (for example, pull -a
+ // with multiple tags).
+ ids = make(map[string]int)
+ }
+ err := jm.Display(out, isTerminal)
+ if jm.ID != "" && isTerminal {
+ // NOTE: this appears to be necessary even if
+ // diff == 0.
+ // [{diff}B = move cursor down diff rows
+ fmt.Fprintf(out, "%c[%dB", 27, diff)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go
new file mode 100644
index 00000000000..479857d904d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage_test.go
@@ -0,0 +1,245 @@
+package jsonmessage
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/docker/docker/pkg/jsonlog"
+ "github.com/docker/docker/pkg/term"
+)
+
+func TestError(t *testing.T) {
+ je := JSONError{404, "Not found"}
+ if je.Error() != "Not found" {
+ t.Fatalf("Expected 'Not found' got '%s'", je.Error())
+ }
+}
+
+func TestProgress(t *testing.T) {
+ termsz, err := term.GetWinsize(0)
+ if err != nil {
+ // we can safely ignore the err here
+ termsz = nil
+ }
+ jp := JSONProgress{}
+ if jp.String() != "" {
+ t.Fatalf("Expected empty string, got '%s'", jp.String())
+ }
+
+ expected := " 1 B"
+ jp2 := JSONProgress{Current: 1}
+ if jp2.String() != expected {
+ t.Fatalf("Expected %q, got %q", expected, jp2.String())
+ }
+
+ expectedStart := "[==========> ] 20 B/100 B"
+ if termsz != nil && termsz.Width <= 110 {
+ expectedStart = " 20 B/100 B"
+ }
+ jp3 := JSONProgress{Current: 20, Total: 100, Start: time.Now().Unix()}
+ // Just look at the start of the string
+ // (the remaining time is really hard to test -_-)
+ if jp3.String()[:len(expectedStart)] != expectedStart {
+ t.Fatalf("Expected to start with %q, got %q", expectedStart, jp3.String())
+ }
+
+ expected = "[=========================> ] 50 B/100 B"
+ if termsz != nil && termsz.Width <= 110 {
+ expected = " 50 B/100 B"
+ }
+ jp4 := JSONProgress{Current: 50, Total: 100}
+ if jp4.String() != expected {
+ t.Fatalf("Expected %q, got %q", expected, jp4.String())
+ }
+
+ // this number can't be negative gh#7136
+ expected = "[==================================================>] 50 B"
+ if termsz != nil && termsz.Width <= 110 {
+ expected = " 50 B"
+ }
+ jp5 := JSONProgress{Current: 50, Total: 40}
+ if jp5.String() != expected {
+ t.Fatalf("Expected %q, got %q", expected, jp5.String())
+ }
+}
+
+func TestJSONMessageDisplay(t *testing.T) {
+ now := time.Now()
+ messages := map[JSONMessage][]string{
+ // Empty
+ JSONMessage{}: {"\n", "\n"},
+ // Status
+ JSONMessage{
+ Status: "status",
+ }: {
+ "status\n",
+ "status\n",
+ },
+ // General
+ JSONMessage{
+ Time: now.Unix(),
+ ID: "ID",
+ From: "From",
+ Status: "status",
+ }: {
+ fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(jsonlog.RFC3339NanoFixed)),
+ fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(jsonlog.RFC3339NanoFixed)),
+ },
+ // General, with nano precision time
+ JSONMessage{
+ TimeNano: now.UnixNano(),
+ ID: "ID",
+ From: "From",
+ Status: "status",
+ }: {
+ fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)),
+ fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)),
+ },
+ // General, with both times Nano is preferred
+ JSONMessage{
+ Time: now.Unix(),
+ TimeNano: now.UnixNano(),
+ ID: "ID",
+ From: "From",
+ Status: "status",
+ }: {
+ fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)),
+ fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)),
+ },
+ // Stream over status
+ JSONMessage{
+ Status: "status",
+ Stream: "stream",
+ }: {
+ "stream",
+ "stream",
+ },
+ // With progress message
+ JSONMessage{
+ Status: "status",
+ ProgressMessage: "progressMessage",
+ }: {
+ "status progressMessage",
+ "status progressMessage",
+ },
+ // With progress, stream empty
+ JSONMessage{
+ Status: "status",
+ Stream: "",
+ Progress: &JSONProgress{Current: 1},
+ }: {
+ "",
+ fmt.Sprintf("%c[2K\rstatus 1 B\r", 27),
+ },
+ }
+
+ // The tests :)
+ for jsonMessage, expectedMessages := range messages {
+ // Without terminal
+ data := bytes.NewBuffer([]byte{})
+ if err := jsonMessage.Display(data, false); err != nil {
+ t.Fatal(err)
+ }
+ if data.String() != expectedMessages[0] {
+ t.Fatalf("Expected [%v], got [%v]", expectedMessages[0], data.String())
+ }
+ // With terminal
+ data = bytes.NewBuffer([]byte{})
+ if err := jsonMessage.Display(data, true); err != nil {
+ t.Fatal(err)
+ }
+ if data.String() != expectedMessages[1] {
+ t.Fatalf("Expected [%v], got [%v]", expectedMessages[1], data.String())
+ }
+ }
+}
+
+// Test JSONMessage with an Error. It will return an error with the text as error, not the meaning of the HTTP code.
+func TestJSONMessageDisplayWithJSONError(t *testing.T) {
+ data := bytes.NewBuffer([]byte{})
+ jsonMessage := JSONMessage{Error: &JSONError{404, "Can't find it"}}
+
+ err := jsonMessage.Display(data, true)
+ if err == nil || err.Error() != "Can't find it" {
+ t.Fatalf("Expected a JSONError 404, got [%v]", err)
+ }
+
+ jsonMessage = JSONMessage{Error: &JSONError{401, "Anything"}}
+ err = jsonMessage.Display(data, true)
+ if err == nil || err.Error() != "Authentication is required." {
+ t.Fatalf("Expected an error [Authentication is required.], got [%v]", err)
+ }
+}
+
+func TestDisplayJSONMessagesStreamInvalidJSON(t *testing.T) {
+ var (
+ inFd uintptr
+ )
+ data := bytes.NewBuffer([]byte{})
+ reader := strings.NewReader("This is not a 'valid' JSON []")
+ inFd, _ = term.GetFdInfo(reader)
+
+ if err := DisplayJSONMessagesStream(reader, data, inFd, false, nil); err == nil && err.Error()[:17] != "invalid character" {
+ t.Fatalf("Should have thrown an error (invalid character in ..), got [%v]", err)
+ }
+}
+
+func TestDisplayJSONMessagesStream(t *testing.T) {
+ var (
+ inFd uintptr
+ )
+
+ messages := map[string][]string{
+ // empty string
+ "": {
+ "",
+ ""},
+ // Without progress & ID
+ "{ \"status\": \"status\" }": {
+ "status\n",
+ "status\n",
+ },
+ // Without progress, with ID
+ "{ \"id\": \"ID\",\"status\": \"status\" }": {
+ "ID: status\n",
+ fmt.Sprintf("ID: status\n%c[%dB", 27, 0),
+ },
+ // With progress
+ "{ \"id\": \"ID\", \"status\": \"status\", \"progress\": \"ProgressMessage\" }": {
+ "ID: status ProgressMessage",
+ fmt.Sprintf("\n%c[%dAID: status ProgressMessage%c[%dB", 27, 0, 27, 0),
+ },
+ // With progressDetail
+ "{ \"id\": \"ID\", \"status\": \"status\", \"progressDetail\": { \"Current\": 1} }": {
+ "", // progressbar is disabled in non-terminal
+ fmt.Sprintf("\n%c[%dA%c[2K\rID: status 1 B\r%c[%dB", 27, 0, 27, 27, 0),
+ },
+ }
+ for jsonMessage, expectedMessages := range messages {
+ data := bytes.NewBuffer([]byte{})
+ reader := strings.NewReader(jsonMessage)
+ inFd, _ = term.GetFdInfo(reader)
+
+ // Without terminal
+ if err := DisplayJSONMessagesStream(reader, data, inFd, false, nil); err != nil {
+ t.Fatal(err)
+ }
+ if data.String() != expectedMessages[0] {
+ t.Fatalf("Expected an [%v], got [%v]", expectedMessages[0], data.String())
+ }
+
+ // With terminal
+ data = bytes.NewBuffer([]byte{})
+ reader = strings.NewReader(jsonMessage)
+ if err := DisplayJSONMessagesStream(reader, data, inFd, true, nil); err != nil {
+ t.Fatal(err)
+ }
+ if data.String() != expectedMessages[1] {
+ t.Fatalf("Expected an [%v], got [%v]", expectedMessages[1], data.String())
+ }
+ }
+
+}
diff --git a/vendor/github.com/docker/docker/pkg/listeners/listeners_solaris.go b/vendor/github.com/docker/docker/pkg/listeners/listeners_solaris.go
new file mode 100644
index 00000000000..ff833e3741a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/listeners/listeners_solaris.go
@@ -0,0 +1,31 @@
+package listeners
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net"
+
+ "github.com/docker/go-connections/sockets"
+)
+
+// Init creates new listeners for the server.
+func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) (ls []net.Listener, err error) {
+ switch proto {
+ case "tcp":
+ l, err := sockets.NewTCPSocket(addr, tlsConfig)
+ if err != nil {
+ return nil, err
+ }
+ ls = append(ls, l)
+ case "unix":
+ l, err := sockets.NewUnixSocket(addr, socketGroup)
+ if err != nil {
+ return nil, fmt.Errorf("can't create unix socket %s: %v", addr, err)
+ }
+ ls = append(ls, l)
+ default:
+ return nil, fmt.Errorf("Invalid protocol format: %q", proto)
+ }
+
+ return
+}
diff --git a/vendor/github.com/docker/docker/pkg/listeners/listeners_unix.go b/vendor/github.com/docker/docker/pkg/listeners/listeners_unix.go
new file mode 100644
index 00000000000..1bcae7aa3e5
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/listeners/listeners_unix.go
@@ -0,0 +1,94 @@
+// +build !windows,!solaris
+
+package listeners
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net"
+ "strconv"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/coreos/go-systemd/activation"
+ "github.com/docker/go-connections/sockets"
+)
+
+// Init creates new listeners for the server.
+// TODO: Clean up the fact that socketGroup and tlsConfig aren't always used.
+func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) ([]net.Listener, error) {
+ ls := []net.Listener{}
+
+ switch proto {
+ case "fd":
+ fds, err := listenFD(addr, tlsConfig)
+ if err != nil {
+ return nil, err
+ }
+ ls = append(ls, fds...)
+ case "tcp":
+ l, err := sockets.NewTCPSocket(addr, tlsConfig)
+ if err != nil {
+ return nil, err
+ }
+ ls = append(ls, l)
+ case "unix":
+ l, err := sockets.NewUnixSocket(addr, socketGroup)
+ if err != nil {
+ return nil, fmt.Errorf("can't create unix socket %s: %v", addr, err)
+ }
+ ls = append(ls, l)
+ default:
+ return nil, fmt.Errorf("invalid protocol format: %q", proto)
+ }
+
+ return ls, nil
+}
+
+// listenFD returns the specified socket activated files as a slice of
+// net.Listeners or all of the activated files if "*" is given.
+func listenFD(addr string, tlsConfig *tls.Config) ([]net.Listener, error) {
+ var (
+ err error
+ listeners []net.Listener
+ )
+ // socket activation
+ if tlsConfig != nil {
+ listeners, err = activation.TLSListeners(false, tlsConfig)
+ } else {
+ listeners, err = activation.Listeners(false)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ if len(listeners) == 0 {
+ return nil, fmt.Errorf("no sockets found via socket activation: make sure the service was started by systemd")
+ }
+
+ // default to all fds just like unix:// and tcp://
+ if addr == "" || addr == "*" {
+ return listeners, nil
+ }
+
+ fdNum, err := strconv.Atoi(addr)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse systemd fd address: should be a number: %v", addr)
+ }
+ fdOffset := fdNum - 3
+ if len(listeners) < int(fdOffset)+1 {
+ return nil, fmt.Errorf("too few socket activated files passed in by systemd")
+ }
+ if listeners[fdOffset] == nil {
+ return nil, fmt.Errorf("failed to listen on systemd activated file: fd %d", fdOffset+3)
+ }
+ for i, ls := range listeners {
+ if i == fdOffset || ls == nil {
+ continue
+ }
+ if err := ls.Close(); err != nil {
+ // TODO: We shouldn't log inside a library. Remove this or error out.
+ logrus.Errorf("failed to close systemd activated file: fd %d: %v", fdOffset+3, err)
+ }
+ }
+ return []net.Listener{listeners[fdOffset]}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/listeners/listeners_windows.go b/vendor/github.com/docker/docker/pkg/listeners/listeners_windows.go
new file mode 100644
index 00000000000..5b5a470fc66
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/listeners/listeners_windows.go
@@ -0,0 +1,54 @@
+package listeners
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net"
+ "strings"
+
+ "github.com/Microsoft/go-winio"
+ "github.com/docker/go-connections/sockets"
+)
+
+// Init creates new listeners for the server.
+func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) ([]net.Listener, error) {
+ ls := []net.Listener{}
+
+ switch proto {
+ case "tcp":
+ l, err := sockets.NewTCPSocket(addr, tlsConfig)
+ if err != nil {
+ return nil, err
+ }
+ ls = append(ls, l)
+
+ case "npipe":
+ // allow Administrators and SYSTEM, plus whatever additional users or groups were specified
+ sddl := "D:P(A;;GA;;;BA)(A;;GA;;;SY)"
+ if socketGroup != "" {
+ for _, g := range strings.Split(socketGroup, ",") {
+ sid, err := winio.LookupSidByName(g)
+ if err != nil {
+ return nil, err
+ }
+ sddl += fmt.Sprintf("(A;;GRGW;;;%s)", sid)
+ }
+ }
+ c := winio.PipeConfig{
+ SecurityDescriptor: sddl,
+ MessageMode: true, // Use message mode so that CloseWrite() is supported
+ InputBufferSize: 65536, // Use 64KB buffers to improve performance
+ OutputBufferSize: 65536,
+ }
+ l, err := winio.ListenPipe(addr, &c)
+ if err != nil {
+ return nil, err
+ }
+ ls = append(ls, l)
+
+ default:
+ return nil, fmt.Errorf("invalid protocol format: windows only supports tcp and npipe")
+ }
+
+ return ls, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/locker/README.md b/vendor/github.com/docker/docker/pkg/locker/README.md
new file mode 100644
index 00000000000..e84a815cc51
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/locker/README.md
@@ -0,0 +1,65 @@
+Locker
+=====
+
+locker provides a mechanism for creating finer-grained locking to help
+free up more global locks to handle other tasks.
+
+The implementation looks close to a sync.Mutex, however the user must provide a
+reference to use to refer to the underlying lock when locking and unlocking,
+and unlock may generate an error.
+
+If a lock with a given name does not exist when `Lock` is called, one is
+created.
+Lock references are automatically cleaned up on `Unlock` if nothing else is
+waiting for the lock.
+
+
+## Usage
+
+```go
+package important
+
+import (
+ "sync"
+ "time"
+
+ "github.com/docker/docker/pkg/locker"
+)
+
+type important struct {
+ locks *locker.Locker
+ data map[string]interface{}
+ mu sync.Mutex
+}
+
+func (i *important) Get(name string) interface{} {
+ i.locks.Lock(name)
+ defer i.locks.Unlock(name)
+ return data[name]
+}
+
+func (i *important) Create(name string, data interface{}) {
+ i.locks.Lock(name)
+ defer i.locks.Unlock(name)
+
+ i.createImportant(data)
+
+ s.mu.Lock()
+ i.data[name] = data
+ s.mu.Unlock()
+}
+
+func (i *important) createImportant(data interface{}) {
+ time.Sleep(10 * time.Second)
+}
+```
+
+For functions dealing with a given name, always lock at the beginning of the
+function (or before doing anything with the underlying state), this ensures any
+other function that is dealing with the same name will block.
+
+When needing to modify the underlying data, use the global lock to ensure nothing
+else is modfying it at the same time.
+Since name lock is already in place, no reads will occur while the modification
+is being performed.
+
diff --git a/vendor/github.com/docker/docker/pkg/locker/locker.go b/vendor/github.com/docker/docker/pkg/locker/locker.go
new file mode 100644
index 00000000000..0b22ddfab85
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/locker/locker.go
@@ -0,0 +1,112 @@
+/*
+Package locker provides a mechanism for creating finer-grained locking to help
+free up more global locks to handle other tasks.
+
+The implementation looks close to a sync.Mutex, however the user must provide a
+reference to use to refer to the underlying lock when locking and unlocking,
+and unlock may generate an error.
+
+If a lock with a given name does not exist when `Lock` is called, one is
+created.
+Lock references are automatically cleaned up on `Unlock` if nothing else is
+waiting for the lock.
+*/
+package locker
+
+import (
+ "errors"
+ "sync"
+ "sync/atomic"
+)
+
+// ErrNoSuchLock is returned when the requested lock does not exist
+var ErrNoSuchLock = errors.New("no such lock")
+
+// Locker provides a locking mechanism based on the passed in reference name
+type Locker struct {
+ mu sync.Mutex
+ locks map[string]*lockCtr
+}
+
+// lockCtr is used by Locker to represent a lock with a given name.
+type lockCtr struct {
+ mu sync.Mutex
+ // waiters is the number of waiters waiting to acquire the lock
+ // this is int32 instead of uint32 so we can add `-1` in `dec()`
+ waiters int32
+}
+
+// inc increments the number of waiters waiting for the lock
+func (l *lockCtr) inc() {
+ atomic.AddInt32(&l.waiters, 1)
+}
+
+// dec decrements the number of waiters waiting on the lock
+func (l *lockCtr) dec() {
+ atomic.AddInt32(&l.waiters, -1)
+}
+
+// count gets the current number of waiters
+func (l *lockCtr) count() int32 {
+ return atomic.LoadInt32(&l.waiters)
+}
+
+// Lock locks the mutex
+func (l *lockCtr) Lock() {
+ l.mu.Lock()
+}
+
+// Unlock unlocks the mutex
+func (l *lockCtr) Unlock() {
+ l.mu.Unlock()
+}
+
+// New creates a new Locker
+func New() *Locker {
+ return &Locker{
+ locks: make(map[string]*lockCtr),
+ }
+}
+
+// Lock locks a mutex with the given name. If it doesn't exist, one is created
+func (l *Locker) Lock(name string) {
+ l.mu.Lock()
+ if l.locks == nil {
+ l.locks = make(map[string]*lockCtr)
+ }
+
+ nameLock, exists := l.locks[name]
+ if !exists {
+ nameLock = &lockCtr{}
+ l.locks[name] = nameLock
+ }
+
+ // increment the nameLock waiters while inside the main mutex
+ // this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently
+ nameLock.inc()
+ l.mu.Unlock()
+
+ // Lock the nameLock outside the main mutex so we don't block other operations
+ // once locked then we can decrement the number of waiters for this lock
+ nameLock.Lock()
+ nameLock.dec()
+}
+
+// Unlock unlocks the mutex with the given name
+// If the given lock is not being waited on by any other callers, it is deleted
+func (l *Locker) Unlock(name string) error {
+ l.mu.Lock()
+ nameLock, exists := l.locks[name]
+ if !exists {
+ l.mu.Unlock()
+ return ErrNoSuchLock
+ }
+
+ if nameLock.count() == 0 {
+ delete(l.locks, name)
+ }
+ nameLock.Unlock()
+
+ l.mu.Unlock()
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/locker/locker_test.go b/vendor/github.com/docker/docker/pkg/locker/locker_test.go
new file mode 100644
index 00000000000..5a297dd47b6
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/locker/locker_test.go
@@ -0,0 +1,124 @@
+package locker
+
+import (
+ "sync"
+ "testing"
+ "time"
+)
+
+func TestLockCounter(t *testing.T) {
+ l := &lockCtr{}
+ l.inc()
+
+ if l.waiters != 1 {
+ t.Fatal("counter inc failed")
+ }
+
+ l.dec()
+ if l.waiters != 0 {
+ t.Fatal("counter dec failed")
+ }
+}
+
+func TestLockerLock(t *testing.T) {
+ l := New()
+ l.Lock("test")
+ ctr := l.locks["test"]
+
+ if ctr.count() != 0 {
+ t.Fatalf("expected waiters to be 0, got :%d", ctr.waiters)
+ }
+
+ chDone := make(chan struct{})
+ go func() {
+ l.Lock("test")
+ close(chDone)
+ }()
+
+ chWaiting := make(chan struct{})
+ go func() {
+ for range time.Tick(1 * time.Millisecond) {
+ if ctr.count() == 1 {
+ close(chWaiting)
+ break
+ }
+ }
+ }()
+
+ select {
+ case <-chWaiting:
+ case <-time.After(3 * time.Second):
+ t.Fatal("timed out waiting for lock waiters to be incremented")
+ }
+
+ select {
+ case <-chDone:
+ t.Fatal("lock should not have returned while it was still held")
+ default:
+ }
+
+ if err := l.Unlock("test"); err != nil {
+ t.Fatal(err)
+ }
+
+ select {
+ case <-chDone:
+ case <-time.After(3 * time.Second):
+ t.Fatalf("lock should have completed")
+ }
+
+ if ctr.count() != 0 {
+ t.Fatalf("expected waiters to be 0, got: %d", ctr.count())
+ }
+}
+
+func TestLockerUnlock(t *testing.T) {
+ l := New()
+
+ l.Lock("test")
+ l.Unlock("test")
+
+ chDone := make(chan struct{})
+ go func() {
+ l.Lock("test")
+ close(chDone)
+ }()
+
+ select {
+ case <-chDone:
+ case <-time.After(3 * time.Second):
+ t.Fatalf("lock should not be blocked")
+ }
+}
+
+func TestLockerConcurrency(t *testing.T) {
+ l := New()
+
+ var wg sync.WaitGroup
+ for i := 0; i <= 10000; i++ {
+ wg.Add(1)
+ go func() {
+ l.Lock("test")
+ // if there is a concurrency issue, will very likely panic here
+ l.Unlock("test")
+ wg.Done()
+ }()
+ }
+
+ chDone := make(chan struct{})
+ go func() {
+ wg.Wait()
+ close(chDone)
+ }()
+
+ select {
+ case <-chDone:
+ case <-time.After(10 * time.Second):
+ t.Fatal("timeout waiting for locks to complete")
+ }
+
+ // Since everything has unlocked this should not exist anymore
+ if ctr, exists := l.locks["test"]; exists {
+ t.Fatalf("lock should not exist: %v", ctr)
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/longpath/longpath.go b/vendor/github.com/docker/docker/pkg/longpath/longpath.go
new file mode 100644
index 00000000000..9b15bfff4c9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/longpath/longpath.go
@@ -0,0 +1,26 @@
+// longpath introduces some constants and helper functions for handling long paths
+// in Windows, which are expected to be prepended with `\\?\` and followed by either
+// a drive letter, a UNC server\share, or a volume identifier.
+
+package longpath
+
+import (
+ "strings"
+)
+
+// Prefix is the longpath prefix for Windows file paths.
+const Prefix = `\\?\`
+
+// AddPrefix will add the Windows long path prefix to the path provided if
+// it does not already have it.
+func AddPrefix(path string) string {
+ if !strings.HasPrefix(path, Prefix) {
+ if strings.HasPrefix(path, `\\`) {
+ // This is a UNC path, so we need to add 'UNC' to the path as well.
+ path = Prefix + `UNC` + path[1:]
+ } else {
+ path = Prefix + path
+ }
+ }
+ return path
+}
diff --git a/vendor/github.com/docker/docker/pkg/longpath/longpath_test.go b/vendor/github.com/docker/docker/pkg/longpath/longpath_test.go
new file mode 100644
index 00000000000..01865eff09a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/longpath/longpath_test.go
@@ -0,0 +1,22 @@
+package longpath
+
+import (
+ "strings"
+ "testing"
+)
+
+func TestStandardLongPath(t *testing.T) {
+ c := `C:\simple\path`
+ longC := AddPrefix(c)
+ if !strings.EqualFold(longC, `\\?\C:\simple\path`) {
+ t.Errorf("Wrong long path returned. Original = %s ; Long = %s", c, longC)
+ }
+}
+
+func TestUNCLongPath(t *testing.T) {
+ c := `\\server\share\path`
+ longC := AddPrefix(c)
+ if !strings.EqualFold(longC, `\\?\UNC\server\share\path`) {
+ t.Errorf("Wrong UNC long path returned. Original = %s ; Long = %s", c, longC)
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/loopback/attach_loopback.go b/vendor/github.com/docker/docker/pkg/loopback/attach_loopback.go
new file mode 100644
index 00000000000..971f45eb489
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/loopback/attach_loopback.go
@@ -0,0 +1,137 @@
+// +build linux
+
+package loopback
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "syscall"
+
+ "github.com/Sirupsen/logrus"
+)
+
+// Loopback related errors
+var (
+ ErrAttachLoopbackDevice = errors.New("loopback attach failed")
+ ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file")
+ ErrSetCapacity = errors.New("Unable set loopback capacity")
+)
+
+func stringToLoopName(src string) [LoNameSize]uint8 {
+ var dst [LoNameSize]uint8
+ copy(dst[:], src[:])
+ return dst
+}
+
+func getNextFreeLoopbackIndex() (int, error) {
+ f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644)
+ if err != nil {
+ return 0, err
+ }
+ defer f.Close()
+
+ index, err := ioctlLoopCtlGetFree(f.Fd())
+ if index < 0 {
+ index = 0
+ }
+ return index, err
+}
+
+func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.File, err error) {
+ // Start looking for a free /dev/loop
+ for {
+ target := fmt.Sprintf("/dev/loop%d", index)
+ index++
+
+ fi, err := os.Stat(target)
+ if err != nil {
+ if os.IsNotExist(err) {
+ logrus.Error("There are no more loopback devices available.")
+ }
+ return nil, ErrAttachLoopbackDevice
+ }
+
+ if fi.Mode()&os.ModeDevice != os.ModeDevice {
+ logrus.Errorf("Loopback device %s is not a block device.", target)
+ continue
+ }
+
+ // OpenFile adds O_CLOEXEC
+ loopFile, err = os.OpenFile(target, os.O_RDWR, 0644)
+ if err != nil {
+ logrus.Errorf("Error opening loopback device: %s", err)
+ return nil, ErrAttachLoopbackDevice
+ }
+
+ // Try to attach to the loop file
+ if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil {
+ loopFile.Close()
+
+ // If the error is EBUSY, then try the next loopback
+ if err != syscall.EBUSY {
+ logrus.Errorf("Cannot set up loopback device %s: %s", target, err)
+ return nil, ErrAttachLoopbackDevice
+ }
+
+ // Otherwise, we keep going with the loop
+ continue
+ }
+ // In case of success, we finished. Break the loop.
+ break
+ }
+
+ // This can't happen, but let's be sure
+ if loopFile == nil {
+ logrus.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name())
+ return nil, ErrAttachLoopbackDevice
+ }
+
+ return loopFile, nil
+}
+
+// AttachLoopDevice attaches the given sparse file to the next
+// available loopback device. It returns an opened *os.File.
+func AttachLoopDevice(sparseName string) (loop *os.File, err error) {
+
+ // Try to retrieve the next available loopback device via syscall.
+ // If it fails, we discard error and start looping for a
+ // loopback from index 0.
+ startIndex, err := getNextFreeLoopbackIndex()
+ if err != nil {
+ logrus.Debugf("Error retrieving the next available loopback: %s", err)
+ }
+
+ // OpenFile adds O_CLOEXEC
+ sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644)
+ if err != nil {
+ logrus.Errorf("Error opening sparse file %s: %s", sparseName, err)
+ return nil, ErrAttachLoopbackDevice
+ }
+ defer sparseFile.Close()
+
+ loopFile, err := openNextAvailableLoopback(startIndex, sparseFile)
+ if err != nil {
+ return nil, err
+ }
+
+ // Set the status of the loopback device
+ loopInfo := &loopInfo64{
+ loFileName: stringToLoopName(loopFile.Name()),
+ loOffset: 0,
+ loFlags: LoFlagsAutoClear,
+ }
+
+ if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil {
+ logrus.Errorf("Cannot set up loopback device info: %s", err)
+
+ // If the call failed, then free the loopback device
+ if err := ioctlLoopClrFd(loopFile.Fd()); err != nil {
+ logrus.Error("Error while cleaning up the loopback device")
+ }
+ loopFile.Close()
+ return nil, ErrAttachLoopbackDevice
+ }
+
+ return loopFile, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/loopback/ioctl.go b/vendor/github.com/docker/docker/pkg/loopback/ioctl.go
new file mode 100644
index 00000000000..0714eb5f875
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/loopback/ioctl.go
@@ -0,0 +1,53 @@
+// +build linux
+
+package loopback
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func ioctlLoopCtlGetFree(fd uintptr) (int, error) {
+ index, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, LoopCtlGetFree, 0)
+ if err != 0 {
+ return 0, err
+ }
+ return int(index), nil
+}
+
+func ioctlLoopSetFd(loopFd, sparseFd uintptr) error {
+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetFd, sparseFd); err != 0 {
+ return err
+ }
+ return nil
+}
+
+func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *loopInfo64) error {
+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 {
+ return err
+ }
+ return nil
+}
+
+func ioctlLoopClrFd(loopFd uintptr) error {
+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopClrFd, 0); err != 0 {
+ return err
+ }
+ return nil
+}
+
+func ioctlLoopGetStatus64(loopFd uintptr) (*loopInfo64, error) {
+ loopInfo := &loopInfo64{}
+
+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 {
+ return nil, err
+ }
+ return loopInfo, nil
+}
+
+func ioctlLoopSetCapacity(loopFd uintptr, value int) error {
+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetCapacity, uintptr(value)); err != 0 {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/loopback/loop_wrapper.go b/vendor/github.com/docker/docker/pkg/loopback/loop_wrapper.go
new file mode 100644
index 00000000000..e1100ce156f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/loopback/loop_wrapper.go
@@ -0,0 +1,52 @@
+// +build linux
+
+package loopback
+
+/*
+#include // FIXME: present only for defines, maybe we can remove it?
+
+#ifndef LOOP_CTL_GET_FREE
+ #define LOOP_CTL_GET_FREE 0x4C82
+#endif
+
+#ifndef LO_FLAGS_PARTSCAN
+ #define LO_FLAGS_PARTSCAN 8
+#endif
+
+*/
+import "C"
+
+type loopInfo64 struct {
+ loDevice uint64 /* ioctl r/o */
+ loInode uint64 /* ioctl r/o */
+ loRdevice uint64 /* ioctl r/o */
+ loOffset uint64
+ loSizelimit uint64 /* bytes, 0 == max available */
+ loNumber uint32 /* ioctl r/o */
+ loEncryptType uint32
+ loEncryptKeySize uint32 /* ioctl w/o */
+ loFlags uint32 /* ioctl r/o */
+ loFileName [LoNameSize]uint8
+ loCryptName [LoNameSize]uint8
+ loEncryptKey [LoKeySize]uint8 /* ioctl w/o */
+ loInit [2]uint64
+}
+
+// IOCTL consts
+const (
+ LoopSetFd = C.LOOP_SET_FD
+ LoopCtlGetFree = C.LOOP_CTL_GET_FREE
+ LoopGetStatus64 = C.LOOP_GET_STATUS64
+ LoopSetStatus64 = C.LOOP_SET_STATUS64
+ LoopClrFd = C.LOOP_CLR_FD
+ LoopSetCapacity = C.LOOP_SET_CAPACITY
+)
+
+// LOOP consts.
+const (
+ LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR
+ LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY
+ LoFlagsPartScan = C.LO_FLAGS_PARTSCAN
+ LoKeySize = C.LO_KEY_SIZE
+ LoNameSize = C.LO_NAME_SIZE
+)
diff --git a/vendor/github.com/docker/docker/pkg/loopback/loopback.go b/vendor/github.com/docker/docker/pkg/loopback/loopback.go
new file mode 100644
index 00000000000..bc0479284c2
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/loopback/loopback.go
@@ -0,0 +1,63 @@
+// +build linux
+
+package loopback
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+
+ "github.com/Sirupsen/logrus"
+)
+
+func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) {
+ loopInfo, err := ioctlLoopGetStatus64(file.Fd())
+ if err != nil {
+ logrus.Errorf("Error get loopback backing file: %s", err)
+ return 0, 0, ErrGetLoopbackBackingFile
+ }
+ return loopInfo.loDevice, loopInfo.loInode, nil
+}
+
+// SetCapacity reloads the size for the loopback device.
+func SetCapacity(file *os.File) error {
+ if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil {
+ logrus.Errorf("Error loopbackSetCapacity: %s", err)
+ return ErrSetCapacity
+ }
+ return nil
+}
+
+// FindLoopDeviceFor returns a loopback device file for the specified file which
+// is backing file of a loop back device.
+func FindLoopDeviceFor(file *os.File) *os.File {
+ stat, err := file.Stat()
+ if err != nil {
+ return nil
+ }
+ targetInode := stat.Sys().(*syscall.Stat_t).Ino
+ targetDevice := stat.Sys().(*syscall.Stat_t).Dev
+
+ for i := 0; true; i++ {
+ path := fmt.Sprintf("/dev/loop%d", i)
+
+ file, err := os.OpenFile(path, os.O_RDWR, 0)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+
+ // Ignore all errors until the first not-exist
+ // we want to continue looking for the file
+ continue
+ }
+
+ dev, inode, err := getLoopbackBackingFile(file)
+ if err == nil && dev == targetDevice && inode == targetInode {
+ return file
+ }
+ file.Close()
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/mflag/LICENSE b/vendor/github.com/docker/docker/pkg/mflag/LICENSE
new file mode 100644
index 00000000000..9b4f4a294ea
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mflag/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2014-2016 The Docker & Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/docker/docker/pkg/mflag/README.md b/vendor/github.com/docker/docker/pkg/mflag/README.md
new file mode 100644
index 00000000000..5e81bb2a363
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mflag/README.md
@@ -0,0 +1,40 @@
+Package mflag (aka multiple-flag) implements command-line flag parsing.
+It's a **hacky** fork of the [official golang package](http://golang.org/pkg/flag/)
+
+It adds:
+
+* both short and long flag version
+`./example -s red` `./example --string blue`
+
+* multiple names for the same option
+```
+$>./example -h
+Usage of example:
+ -s, --string="": a simple string
+```
+
+___
+It is very flexible on purpose, so you can do things like:
+```
+$>./example -h
+Usage of example:
+ -s, -string, --string="": a simple string
+```
+
+Or:
+```
+$>./example -h
+Usage of example:
+ -oldflag, --newflag="": a simple string
+```
+
+You can also hide some flags from the usage, so if we want only `--newflag`:
+```
+$>./example -h
+Usage of example:
+ --newflag="": a simple string
+$>./example -oldflag str
+str
+```
+
+See [example.go](example/example.go) for more details.
diff --git a/vendor/github.com/docker/docker/pkg/mflag/example/example.go b/vendor/github.com/docker/docker/pkg/mflag/example/example.go
new file mode 100644
index 00000000000..2e766dd1e56
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mflag/example/example.go
@@ -0,0 +1,36 @@
+package main
+
+import (
+ "fmt"
+
+ flag "github.com/docker/docker/pkg/mflag"
+)
+
+var (
+ i int
+ str string
+ b, b2, h bool
+)
+
+func init() {
+ flag.Bool([]string{"#hp", "#-halp"}, false, "display the halp")
+ flag.BoolVar(&b, []string{"b", "#bal", "#bol", "-bal"}, false, "a simple bool")
+ flag.BoolVar(&b, []string{"g", "#gil"}, false, "a simple bool")
+ flag.BoolVar(&b2, []string{"#-bool"}, false, "a simple bool")
+ flag.IntVar(&i, []string{"-integer", "-number"}, -1, "a simple integer")
+ flag.StringVar(&str, []string{"s", "#hidden", "-string"}, "", "a simple string") //-s -hidden and --string will work, but -hidden won't be in the usage
+ flag.BoolVar(&h, []string{"h", "#help", "-help"}, false, "display the help")
+ flag.StringVar(&str, []string{"mode"}, "mode1", "set the mode\nmode1: use the mode1\nmode2: use the mode2\nmode3: use the mode3")
+ flag.Parse()
+}
+func main() {
+ if h {
+ flag.PrintDefaults()
+ } else {
+ fmt.Printf("s/#hidden/-string: %s\n", str)
+ fmt.Printf("b: %t\n", b)
+ fmt.Printf("-bool: %t\n", b2)
+ fmt.Printf("s/#hidden/-string(via lookup): %s\n", flag.Lookup("s").Value.String())
+ fmt.Printf("ARGS: %v\n", flag.Args())
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/mflag/flag.go b/vendor/github.com/docker/docker/pkg/mflag/flag.go
new file mode 100644
index 00000000000..f13a8cf06c8
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mflag/flag.go
@@ -0,0 +1,1280 @@
+// Copyright 2014-2016 The Docker & Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package mflag implements command-line flag parsing.
+//
+// Usage:
+//
+// Define flags using flag.String(), Bool(), Int(), etc.
+//
+// This declares an integer flag, -f or --flagname, stored in the pointer ip, with type *int.
+// import flag "github.com/docker/docker/pkg/mflag"
+// var ip = flag.Int([]string{"f", "-flagname"}, 1234, "help message for flagname")
+// If you like, you can bind the flag to a variable using the Var() functions.
+// var flagvar int
+// func init() {
+// // -flaghidden will work, but will be hidden from the usage
+// flag.IntVar(&flagvar, []string{"f", "#flaghidden", "-flagname"}, 1234, "help message for flagname")
+// }
+// Or you can create custom flags that satisfy the Value interface (with
+// pointer receivers) and couple them to flag parsing by
+// flag.Var(&flagVal, []string{"name"}, "help message for flagname")
+// For such flags, the default value is just the initial value of the variable.
+//
+// You can also add "deprecated" flags, they are still usable, but are not shown
+// in the usage and will display a warning when you try to use them. `#` before
+// an option means this option is deprecated, if there is a following option
+// without `#` ahead, then that's the replacement, if not, it will just be removed:
+// var ip = flag.Int([]string{"#f", "#flagname", "-flagname"}, 1234, "help message for flagname")
+// this will display: `Warning: '-f' is deprecated, it will be replaced by '--flagname' soon. See usage.` or
+// this will display: `Warning: '-flagname' is deprecated, it will be replaced by '--flagname' soon. See usage.`
+// var ip = flag.Int([]string{"f", "#flagname"}, 1234, "help message for flagname")
+// will display: `Warning: '-flagname' is deprecated, it will be removed soon. See usage.`
+// so you can only use `-f`.
+//
+// You can also group one letter flags, if you declare
+// var v = flag.Bool([]string{"v", "-verbose"}, false, "help message for verbose")
+// var s = flag.Bool([]string{"s", "-slow"}, false, "help message for slow")
+// you will be able to use the -vs or -sv
+//
+// After all flags are defined, call
+// flag.Parse()
+// to parse the command line into the defined flags.
+//
+// Flags may then be used directly. If you're using the flags themselves,
+// they are all pointers; if you bind to variables, they're values.
+// fmt.Println("ip has value ", *ip)
+// fmt.Println("flagvar has value ", flagvar)
+//
+// After parsing, the arguments after the flag are available as the
+// slice flag.Args() or individually as flag.Arg(i).
+// The arguments are indexed from 0 through flag.NArg()-1.
+//
+// Command line flag syntax:
+// -flag
+// -flag=x
+// -flag="x"
+// -flag='x'
+// -flag x // non-boolean flags only
+// One or two minus signs may be used; they are equivalent.
+// The last form is not permitted for boolean flags because the
+// meaning of the command
+// cmd -x *
+// will change if there is a file called 0, false, etc. You must
+// use the -flag=false form to turn off a boolean flag.
+//
+// Flag parsing stops just before the first non-flag argument
+// ("-" is a non-flag argument) or after the terminator "--".
+//
+// Integer flags accept 1234, 0664, 0x1234 and may be negative.
+// Boolean flags may be 1, 0, t, f, true, false, TRUE, FALSE, True, False.
+// Duration flags accept any input valid for time.ParseDuration.
+//
+// The default set of command-line flags is controlled by
+// top-level functions. The FlagSet type allows one to define
+// independent sets of flags, such as to implement subcommands
+// in a command-line interface. The methods of FlagSet are
+// analogous to the top-level functions for the command-line
+// flag set.
+
+package mflag
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "text/tabwriter"
+ "time"
+
+ "github.com/docker/docker/pkg/homedir"
+)
+
+// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined.
+var ErrHelp = errors.New("flag: help requested")
+
+// ErrRetry is the error returned if you need to try letter by letter
+var ErrRetry = errors.New("flag: retry")
+
+// -- bool Value
+type boolValue bool
+
+func newBoolValue(val bool, p *bool) *boolValue {
+ *p = val
+ return (*boolValue)(p)
+}
+
+func (b *boolValue) Set(s string) error {
+ v, err := strconv.ParseBool(s)
+ *b = boolValue(v)
+ return err
+}
+
+func (b *boolValue) Get() interface{} { return bool(*b) }
+
+func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) }
+
+func (b *boolValue) IsBoolFlag() bool { return true }
+
+// optional interface to indicate boolean flags that can be
+// supplied without "=value" text
+type boolFlag interface {
+ Value
+ IsBoolFlag() bool
+}
+
+// -- int Value
+type intValue int
+
+func newIntValue(val int, p *int) *intValue {
+ *p = val
+ return (*intValue)(p)
+}
+
+func (i *intValue) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 64)
+ *i = intValue(v)
+ return err
+}
+
+func (i *intValue) Get() interface{} { return int(*i) }
+
+func (i *intValue) String() string { return fmt.Sprintf("%v", *i) }
+
+// -- int64 Value
+type int64Value int64
+
+func newInt64Value(val int64, p *int64) *int64Value {
+ *p = val
+ return (*int64Value)(p)
+}
+
+func (i *int64Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 64)
+ *i = int64Value(v)
+ return err
+}
+
+func (i *int64Value) Get() interface{} { return int64(*i) }
+
+func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) }
+
+// -- uint Value
+type uintValue uint
+
+func newUintValue(val uint, p *uint) *uintValue {
+ *p = val
+ return (*uintValue)(p)
+}
+
+func (i *uintValue) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 64)
+ *i = uintValue(v)
+ return err
+}
+
+func (i *uintValue) Get() interface{} { return uint(*i) }
+
+func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) }
+
+// -- uint64 Value
+type uint64Value uint64
+
+func newUint64Value(val uint64, p *uint64) *uint64Value {
+ *p = val
+ return (*uint64Value)(p)
+}
+
+func (i *uint64Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 64)
+ *i = uint64Value(v)
+ return err
+}
+
+func (i *uint64Value) Get() interface{} { return uint64(*i) }
+
+func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) }
+
+// -- uint16 Value
+type uint16Value uint16
+
+func newUint16Value(val uint16, p *uint16) *uint16Value {
+ *p = val
+ return (*uint16Value)(p)
+}
+
+func (i *uint16Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 16)
+ *i = uint16Value(v)
+ return err
+}
+
+func (i *uint16Value) Get() interface{} { return uint16(*i) }
+
+func (i *uint16Value) String() string { return fmt.Sprintf("%v", *i) }
+
+// -- string Value
+type stringValue string
+
+func newStringValue(val string, p *string) *stringValue {
+ *p = val
+ return (*stringValue)(p)
+}
+
+func (s *stringValue) Set(val string) error {
+ *s = stringValue(val)
+ return nil
+}
+
+func (s *stringValue) Get() interface{} { return string(*s) }
+
+func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) }
+
+// -- float64 Value
+type float64Value float64
+
+func newFloat64Value(val float64, p *float64) *float64Value {
+ *p = val
+ return (*float64Value)(p)
+}
+
+func (f *float64Value) Set(s string) error {
+ v, err := strconv.ParseFloat(s, 64)
+ *f = float64Value(v)
+ return err
+}
+
+func (f *float64Value) Get() interface{} { return float64(*f) }
+
+func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) }
+
+// -- time.Duration Value
+type durationValue time.Duration
+
+func newDurationValue(val time.Duration, p *time.Duration) *durationValue {
+ *p = val
+ return (*durationValue)(p)
+}
+
+func (d *durationValue) Set(s string) error {
+ v, err := time.ParseDuration(s)
+ *d = durationValue(v)
+ return err
+}
+
+func (d *durationValue) Get() interface{} { return time.Duration(*d) }
+
+func (d *durationValue) String() string { return (*time.Duration)(d).String() }
+
+// Value is the interface to the dynamic value stored in a flag.
+// (The default value is represented as a string.)
+//
+// If a Value has an IsBoolFlag() bool method returning true,
+// the command-line parser makes -name equivalent to -name=true
+// rather than using the next command-line argument.
+type Value interface {
+ String() string
+ Set(string) error
+}
+
+// Getter is an interface that allows the contents of a Value to be retrieved.
+// It wraps the Value interface, rather than being part of it, because it
+// appeared after Go 1 and its compatibility rules. All Value types provided
+// by this package satisfy the Getter interface.
+type Getter interface {
+ Value
+ Get() interface{}
+}
+
+// ErrorHandling defines how to handle flag parsing errors.
+type ErrorHandling int
+
+// ErrorHandling strategies available when a flag parsing error occurs
+const (
+ ContinueOnError ErrorHandling = iota
+ ExitOnError
+ PanicOnError
+)
+
+// A FlagSet represents a set of defined flags. The zero value of a FlagSet
+// has no name and has ContinueOnError error handling.
+type FlagSet struct {
+ // Usage is the function called when an error occurs while parsing flags.
+ // The field is a function (not a method) that may be changed to point to
+ // a custom error handler.
+ Usage func()
+ ShortUsage func()
+
+ name string
+ parsed bool
+ actual map[string]*Flag
+ formal map[string]*Flag
+ args []string // arguments after flags
+ errorHandling ErrorHandling
+ output io.Writer // nil means stderr; use Out() accessor
+ nArgRequirements []nArgRequirement
+}
+
+// A Flag represents the state of a flag.
+type Flag struct {
+ Names []string // name as it appears on command line
+ Usage string // help message
+ Value Value // value as set
+ DefValue string // default value (as text); for usage message
+}
+
+type flagSlice []string
+
+func (p flagSlice) Len() int { return len(p) }
+func (p flagSlice) Less(i, j int) bool {
+ pi, pj := strings.TrimPrefix(p[i], "-"), strings.TrimPrefix(p[j], "-")
+ lpi, lpj := strings.ToLower(pi), strings.ToLower(pj)
+ if lpi != lpj {
+ return lpi < lpj
+ }
+ return pi < pj
+}
+func (p flagSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+// sortFlags returns the flags as a slice in lexicographical sorted order.
+func sortFlags(flags map[string]*Flag) []*Flag {
+ var list flagSlice
+
+ // The sorted list is based on the first name, when flag map might use the other names.
+ nameMap := make(map[string]string)
+
+ for n, f := range flags {
+ fName := strings.TrimPrefix(f.Names[0], "#")
+ nameMap[fName] = n
+ if len(f.Names) == 1 {
+ list = append(list, fName)
+ continue
+ }
+
+ found := false
+ for _, name := range list {
+ if name == fName {
+ found = true
+ break
+ }
+ }
+ if !found {
+ list = append(list, fName)
+ }
+ }
+ sort.Sort(list)
+ result := make([]*Flag, len(list))
+ for i, name := range list {
+ result[i] = flags[nameMap[name]]
+ }
+ return result
+}
+
+// Name returns the name of the FlagSet.
+func (fs *FlagSet) Name() string {
+ return fs.name
+}
+
+// Out returns the destination for usage and error messages.
+func (fs *FlagSet) Out() io.Writer {
+ if fs.output == nil {
+ return os.Stderr
+ }
+ return fs.output
+}
+
+// SetOutput sets the destination for usage and error messages.
+// If output is nil, os.Stderr is used.
+func (fs *FlagSet) SetOutput(output io.Writer) {
+ fs.output = output
+}
+
+// VisitAll visits the flags in lexicographical order, calling fn for each.
+// It visits all flags, even those not set.
+func (fs *FlagSet) VisitAll(fn func(*Flag)) {
+ for _, flag := range sortFlags(fs.formal) {
+ fn(flag)
+ }
+}
+
+// VisitAll visits the command-line flags in lexicographical order, calling
+// fn for each. It visits all flags, even those not set.
+func VisitAll(fn func(*Flag)) {
+ CommandLine.VisitAll(fn)
+}
+
+// Visit visits the flags in lexicographical order, calling fn for each.
+// It visits only those flags that have been set.
+func (fs *FlagSet) Visit(fn func(*Flag)) {
+ for _, flag := range sortFlags(fs.actual) {
+ fn(flag)
+ }
+}
+
+// Visit visits the command-line flags in lexicographical order, calling fn
+// for each. It visits only those flags that have been set.
+func Visit(fn func(*Flag)) {
+ CommandLine.Visit(fn)
+}
+
+// Lookup returns the Flag structure of the named flag, returning nil if none exists.
+func (fs *FlagSet) Lookup(name string) *Flag {
+ return fs.formal[name]
+}
+
+// IsSet indicates whether the specified flag is set in the given FlagSet
+func (fs *FlagSet) IsSet(name string) bool {
+ return fs.actual[name] != nil
+}
+
+// Lookup returns the Flag structure of the named command-line flag,
+// returning nil if none exists.
+func Lookup(name string) *Flag {
+ return CommandLine.formal[name]
+}
+
+// IsSet indicates whether the specified flag was specified at all on the cmd line.
+func IsSet(name string) bool {
+ return CommandLine.IsSet(name)
+}
+
+type nArgRequirementType int
+
+// Indicator used to pass to BadArgs function
+const (
+ Exact nArgRequirementType = iota
+ Max
+ Min
+)
+
+type nArgRequirement struct {
+ Type nArgRequirementType
+ N int
+}
+
+// Require adds a requirement about the number of arguments for the FlagSet.
+// The first parameter can be Exact, Max, or Min to respectively specify the exact,
+// the maximum, or the minimal number of arguments required.
+// The actual check is done in FlagSet.CheckArgs().
+func (fs *FlagSet) Require(nArgRequirementType nArgRequirementType, nArg int) {
+ fs.nArgRequirements = append(fs.nArgRequirements, nArgRequirement{nArgRequirementType, nArg})
+}
+
+// CheckArgs uses the requirements set by FlagSet.Require() to validate
+// the number of arguments. If the requirements are not met,
+// an error message string is returned.
+func (fs *FlagSet) CheckArgs() (message string) {
+ for _, req := range fs.nArgRequirements {
+ var arguments string
+ if req.N == 1 {
+ arguments = "1 argument"
+ } else {
+ arguments = fmt.Sprintf("%d arguments", req.N)
+ }
+
+ str := func(kind string) string {
+ return fmt.Sprintf("%q requires %s%s", fs.name, kind, arguments)
+ }
+
+ switch req.Type {
+ case Exact:
+ if fs.NArg() != req.N {
+ return str("")
+ }
+ case Max:
+ if fs.NArg() > req.N {
+ return str("a maximum of ")
+ }
+ case Min:
+ if fs.NArg() < req.N {
+ return str("a minimum of ")
+ }
+ }
+ }
+ return ""
+}
+
+// Set sets the value of the named flag.
+func (fs *FlagSet) Set(name, value string) error {
+ flag, ok := fs.formal[name]
+ if !ok {
+ return fmt.Errorf("no such flag -%v", name)
+ }
+ if err := flag.Value.Set(value); err != nil {
+ return err
+ }
+ if fs.actual == nil {
+ fs.actual = make(map[string]*Flag)
+ }
+ fs.actual[name] = flag
+ return nil
+}
+
+// Set sets the value of the named command-line flag.
+func Set(name, value string) error {
+ return CommandLine.Set(name, value)
+}
+
+// isZeroValue guesses whether the string represents the zero
+// value for a flag. It is not accurate but in practice works OK.
+func isZeroValue(value string) bool {
+ switch value {
+ case "false":
+ return true
+ case "":
+ return true
+ case "0":
+ return true
+ }
+ return false
+}
+
+// PrintDefaults prints, to standard error unless configured
+// otherwise, the default values of all defined flags in the set.
+func (fs *FlagSet) PrintDefaults() {
+ writer := tabwriter.NewWriter(fs.Out(), 20, 1, 3, ' ', 0)
+ home := homedir.Get()
+
+ // Don't substitute when HOME is /
+ if runtime.GOOS != "windows" && home == "/" {
+ home = ""
+ }
+
+ // Add a blank line between cmd description and list of options
+ if fs.FlagCount() > 0 {
+ fmt.Fprintln(writer, "")
+ }
+
+ fs.VisitAll(func(flag *Flag) {
+ names := []string{}
+ for _, name := range flag.Names {
+ if name[0] != '#' {
+ names = append(names, name)
+ }
+ }
+ if len(names) > 0 && len(flag.Usage) > 0 {
+ val := flag.DefValue
+
+ if home != "" && strings.HasPrefix(val, home) {
+ val = homedir.GetShortcutString() + val[len(home):]
+ }
+
+ if isZeroValue(val) {
+ format := " -%s"
+ fmt.Fprintf(writer, format, strings.Join(names, ", -"))
+ } else {
+ format := " -%s=%s"
+ fmt.Fprintf(writer, format, strings.Join(names, ", -"), val)
+ }
+ for _, line := range strings.Split(flag.Usage, "\n") {
+ fmt.Fprintln(writer, "\t", line)
+ }
+ }
+ })
+ writer.Flush()
+}
+
+// PrintDefaults prints to standard error the default values of all defined command-line flags.
+func PrintDefaults() {
+ CommandLine.PrintDefaults()
+}
+
+// defaultUsage is the default function to print a usage message.
+func defaultUsage(fs *FlagSet) {
+ if fs.name == "" {
+ fmt.Fprintf(fs.Out(), "Usage:\n")
+ } else {
+ fmt.Fprintf(fs.Out(), "Usage of %s:\n", fs.name)
+ }
+ fs.PrintDefaults()
+}
+
+// NOTE: Usage is not just defaultUsage(CommandLine)
+// because it serves (via godoc flag Usage) as the example
+// for how to write your own usage function.
+
+// Usage prints to standard error a usage message documenting all defined command-line flags.
+// The function is a variable that may be changed to point to a custom function.
+var Usage = func() {
+ fmt.Fprintf(CommandLine.Out(), "Usage of %s:\n", os.Args[0])
+ PrintDefaults()
+}
+
+// ShortUsage prints to standard error a usage message documenting the standard command layout
+// The function is a variable that may be changed to point to a custom function.
+var ShortUsage = func() {
+ fmt.Fprintf(CommandLine.output, "Usage of %s:\n", os.Args[0])
+}
+
+// FlagCount returns the number of flags that have been defined.
+func (fs *FlagSet) FlagCount() int { return len(sortFlags(fs.formal)) }
+
+// FlagCountUndeprecated returns the number of undeprecated flags that have been defined.
+func (fs *FlagSet) FlagCountUndeprecated() int {
+ count := 0
+ for _, flag := range sortFlags(fs.formal) {
+ for _, name := range flag.Names {
+ if name[0] != '#' {
+ count++
+ break
+ }
+ }
+ }
+ return count
+}
+
+// NFlag returns the number of flags that have been set.
+func (fs *FlagSet) NFlag() int { return len(fs.actual) }
+
+// NFlag returns the number of command-line flags that have been set.
+func NFlag() int { return len(CommandLine.actual) }
+
+// Arg returns the i'th argument. Arg(0) is the first remaining argument
+// after flags have been processed.
+func (fs *FlagSet) Arg(i int) string {
+ if i < 0 || i >= len(fs.args) {
+ return ""
+ }
+ return fs.args[i]
+}
+
+// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument
+// after flags have been processed.
+func Arg(i int) string {
+ return CommandLine.Arg(i)
+}
+
+// NArg is the number of arguments remaining after flags have been processed.
+func (fs *FlagSet) NArg() int { return len(fs.args) }
+
+// NArg is the number of arguments remaining after flags have been processed.
+func NArg() int { return len(CommandLine.args) }
+
+// Args returns the non-flag arguments.
+func (fs *FlagSet) Args() []string { return fs.args }
+
+// Args returns the non-flag command-line arguments.
+func Args() []string { return CommandLine.args }
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func (fs *FlagSet) BoolVar(p *bool, names []string, value bool, usage string) {
+ fs.Var(newBoolValue(value, p), names, usage)
+}
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func BoolVar(p *bool, names []string, value bool, usage string) {
+ CommandLine.Var(newBoolValue(value, p), names, usage)
+}
+
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func (fs *FlagSet) Bool(names []string, value bool, usage string) *bool {
+ p := new(bool)
+ fs.BoolVar(p, names, value, usage)
+ return p
+}
+
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func Bool(names []string, value bool, usage string) *bool {
+ return CommandLine.Bool(names, value, usage)
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func (fs *FlagSet) IntVar(p *int, names []string, value int, usage string) {
+ fs.Var(newIntValue(value, p), names, usage)
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func IntVar(p *int, names []string, value int, usage string) {
+ CommandLine.Var(newIntValue(value, p), names, usage)
+}
+
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func (fs *FlagSet) Int(names []string, value int, usage string) *int {
+ p := new(int)
+ fs.IntVar(p, names, value, usage)
+ return p
+}
+
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func Int(names []string, value int, usage string) *int {
+ return CommandLine.Int(names, value, usage)
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func (fs *FlagSet) Int64Var(p *int64, names []string, value int64, usage string) {
+ fs.Var(newInt64Value(value, p), names, usage)
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func Int64Var(p *int64, names []string, value int64, usage string) {
+ CommandLine.Var(newInt64Value(value, p), names, usage)
+}
+
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func (fs *FlagSet) Int64(names []string, value int64, usage string) *int64 {
+ p := new(int64)
+ fs.Int64Var(p, names, value, usage)
+ return p
+}
+
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func Int64(names []string, value int64, usage string) *int64 {
+ return CommandLine.Int64(names, value, usage)
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func (fs *FlagSet) UintVar(p *uint, names []string, value uint, usage string) {
+ fs.Var(newUintValue(value, p), names, usage)
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func UintVar(p *uint, names []string, value uint, usage string) {
+ CommandLine.Var(newUintValue(value, p), names, usage)
+}
+
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func (fs *FlagSet) Uint(names []string, value uint, usage string) *uint {
+ p := new(uint)
+ fs.UintVar(p, names, value, usage)
+ return p
+}
+
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func Uint(names []string, value uint, usage string) *uint {
+ return CommandLine.Uint(names, value, usage)
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func (fs *FlagSet) Uint64Var(p *uint64, names []string, value uint64, usage string) {
+ fs.Var(newUint64Value(value, p), names, usage)
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func Uint64Var(p *uint64, names []string, value uint64, usage string) {
+ CommandLine.Var(newUint64Value(value, p), names, usage)
+}
+
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func (fs *FlagSet) Uint64(names []string, value uint64, usage string) *uint64 {
+ p := new(uint64)
+ fs.Uint64Var(p, names, value, usage)
+ return p
+}
+
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func Uint64(names []string, value uint64, usage string) *uint64 {
+ return CommandLine.Uint64(names, value, usage)
+}
+
+// Uint16Var defines a uint16 flag with specified name, default value, and usage string.
+// The argument p points to a uint16 variable in which to store the value of the flag.
+func (fs *FlagSet) Uint16Var(p *uint16, names []string, value uint16, usage string) {
+ fs.Var(newUint16Value(value, p), names, usage)
+}
+
+// Uint16Var defines a uint16 flag with specified name, default value, and usage string.
+// The argument p points to a uint16 variable in which to store the value of the flag.
+func Uint16Var(p *uint16, names []string, value uint16, usage string) {
+ CommandLine.Var(newUint16Value(value, p), names, usage)
+}
+
+// Uint16 defines a uint16 flag with specified name, default value, and usage string.
+// The return value is the address of a uint16 variable that stores the value of the flag.
+func (fs *FlagSet) Uint16(names []string, value uint16, usage string) *uint16 {
+ p := new(uint16)
+ fs.Uint16Var(p, names, value, usage)
+ return p
+}
+
+// Uint16 defines a uint16 flag with specified name, default value, and usage string.
+// The return value is the address of a uint16 variable that stores the value of the flag.
+func Uint16(names []string, value uint16, usage string) *uint16 {
+ return CommandLine.Uint16(names, value, usage)
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func (fs *FlagSet) StringVar(p *string, names []string, value string, usage string) {
+ fs.Var(newStringValue(value, p), names, usage)
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func StringVar(p *string, names []string, value string, usage string) {
+ CommandLine.Var(newStringValue(value, p), names, usage)
+}
+
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func (fs *FlagSet) String(names []string, value string, usage string) *string {
+ p := new(string)
+ fs.StringVar(p, names, value, usage)
+ return p
+}
+
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func String(names []string, value string, usage string) *string {
+ return CommandLine.String(names, value, usage)
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func (fs *FlagSet) Float64Var(p *float64, names []string, value float64, usage string) {
+ fs.Var(newFloat64Value(value, p), names, usage)
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func Float64Var(p *float64, names []string, value float64, usage string) {
+ CommandLine.Var(newFloat64Value(value, p), names, usage)
+}
+
+// Float64 defines a float64 flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func (fs *FlagSet) Float64(names []string, value float64, usage string) *float64 {
+ p := new(float64)
+ fs.Float64Var(p, names, value, usage)
+ return p
+}
+
+// Float64 defines a float64 flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func Float64(names []string, value float64, usage string) *float64 {
+ return CommandLine.Float64(names, value, usage)
+}
+
+// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
+// The argument p points to a time.Duration variable in which to store the value of the flag.
+func (fs *FlagSet) DurationVar(p *time.Duration, names []string, value time.Duration, usage string) {
+ fs.Var(newDurationValue(value, p), names, usage)
+}
+
+// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
+// The argument p points to a time.Duration variable in which to store the value of the flag.
+func DurationVar(p *time.Duration, names []string, value time.Duration, usage string) {
+ CommandLine.Var(newDurationValue(value, p), names, usage)
+}
+
+// Duration defines a time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a time.Duration variable that stores the value of the flag.
+func (fs *FlagSet) Duration(names []string, value time.Duration, usage string) *time.Duration {
+ p := new(time.Duration)
+ fs.DurationVar(p, names, value, usage)
+ return p
+}
+
+// Duration defines a time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a time.Duration variable that stores the value of the flag.
+func Duration(names []string, value time.Duration, usage string) *time.Duration {
+ return CommandLine.Duration(names, value, usage)
+}
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func (fs *FlagSet) Var(value Value, names []string, usage string) {
+ // Remember the default value as a string; it won't change.
+ flag := &Flag{names, usage, value, value.String()}
+ for _, name := range names {
+ name = strings.TrimPrefix(name, "#")
+ _, alreadythere := fs.formal[name]
+ if alreadythere {
+ var msg string
+ if fs.name == "" {
+ msg = fmt.Sprintf("flag redefined: %s", name)
+ } else {
+ msg = fmt.Sprintf("%s flag redefined: %s", fs.name, name)
+ }
+ fmt.Fprintln(fs.Out(), msg)
+ panic(msg) // Happens only if flags are declared with identical names
+ }
+ if fs.formal == nil {
+ fs.formal = make(map[string]*Flag)
+ }
+ fs.formal[name] = flag
+ }
+}
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func Var(value Value, names []string, usage string) {
+ CommandLine.Var(value, names, usage)
+}
+
+// failf prints to standard error a formatted error and usage message and
+// returns the error.
+func (fs *FlagSet) failf(format string, a ...interface{}) error {
+ err := fmt.Errorf(format, a...)
+ fmt.Fprintln(fs.Out(), err)
+ if os.Args[0] == fs.name {
+ fmt.Fprintf(fs.Out(), "See '%s --help'.\n", os.Args[0])
+ } else {
+ fmt.Fprintf(fs.Out(), "See '%s %s --help'.\n", os.Args[0], fs.name)
+ }
+ return err
+}
+
+// usage calls the Usage method for the flag set, or the usage function if
+// the flag set is CommandLine.
+func (fs *FlagSet) usage() {
+ if fs == CommandLine {
+ Usage()
+ } else if fs.Usage == nil {
+ defaultUsage(fs)
+ } else {
+ fs.Usage()
+ }
+}
+
+func trimQuotes(str string) string {
+ if len(str) == 0 {
+ return str
+ }
+ type quote struct {
+ start, end byte
+ }
+
+ // All valid quote types.
+ quotes := []quote{
+ // Double quotes
+ {
+ start: '"',
+ end: '"',
+ },
+
+ // Single quotes
+ {
+ start: '\'',
+ end: '\'',
+ },
+ }
+
+ for _, quote := range quotes {
+ // Only strip if outermost match.
+ if str[0] == quote.start && str[len(str)-1] == quote.end {
+ str = str[1 : len(str)-1]
+ break
+ }
+ }
+
+ return str
+}
+
+// parseOne parses one flag. It reports whether a flag was seen.
+func (fs *FlagSet) parseOne() (bool, string, error) {
+ if len(fs.args) == 0 {
+ return false, "", nil
+ }
+ s := fs.args[0]
+ if len(s) == 0 || s[0] != '-' || len(s) == 1 {
+ return false, "", nil
+ }
+ if s[1] == '-' && len(s) == 2 { // "--" terminates the flags
+ fs.args = fs.args[1:]
+ return false, "", nil
+ }
+ name := s[1:]
+ if len(name) == 0 || name[0] == '=' {
+ return false, "", fs.failf("bad flag syntax: %s", s)
+ }
+
+ // it's a flag. does it have an argument?
+ fs.args = fs.args[1:]
+ hasValue := false
+ value := ""
+ if i := strings.Index(name, "="); i != -1 {
+ value = trimQuotes(name[i+1:])
+ hasValue = true
+ name = name[:i]
+ }
+
+ m := fs.formal
+ flag, alreadythere := m[name] // BUG
+ if !alreadythere {
+ if name == "-help" || name == "help" || name == "h" { // special case for nice help message.
+ fs.usage()
+ return false, "", ErrHelp
+ }
+ if len(name) > 0 && name[0] == '-' {
+ return false, "", fs.failf("flag provided but not defined: -%s", name)
+ }
+ return false, name, ErrRetry
+ }
+ if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg
+ if hasValue {
+ if err := fv.Set(value); err != nil {
+ return false, "", fs.failf("invalid boolean value %q for -%s: %v", value, name, err)
+ }
+ } else {
+ fv.Set("true")
+ }
+ } else {
+ // It must have a value, which might be the next argument.
+ if !hasValue && len(fs.args) > 0 {
+ // value is the next arg
+ hasValue = true
+ value, fs.args = fs.args[0], fs.args[1:]
+ }
+ if !hasValue {
+ return false, "", fs.failf("flag needs an argument: -%s", name)
+ }
+ if err := flag.Value.Set(value); err != nil {
+ return false, "", fs.failf("invalid value %q for flag -%s: %v", value, name, err)
+ }
+ }
+ if fs.actual == nil {
+ fs.actual = make(map[string]*Flag)
+ }
+ fs.actual[name] = flag
+ for i, n := range flag.Names {
+ if n == fmt.Sprintf("#%s", name) {
+ replacement := ""
+ for j := i; j < len(flag.Names); j++ {
+ if flag.Names[j][0] != '#' {
+ replacement = flag.Names[j]
+ break
+ }
+ }
+ if replacement != "" {
+ fmt.Fprintf(fs.Out(), "Warning: '-%s' is deprecated, it will be replaced by '-%s' soon. See usage.\n", name, replacement)
+ } else {
+ fmt.Fprintf(fs.Out(), "Warning: '-%s' is deprecated, it will be removed soon. See usage.\n", name)
+ }
+ }
+ }
+ return true, "", nil
+}
+
+// Parse parses flag definitions from the argument list, which should not
+// include the command name. Must be called after all flags in the FlagSet
+// are defined and before flags are accessed by the program.
+// The return value will be ErrHelp if -help was set but not defined.
+func (fs *FlagSet) Parse(arguments []string) error {
+ fs.parsed = true
+ fs.args = arguments
+ for {
+ seen, name, err := fs.parseOne()
+ if seen {
+ continue
+ }
+ if err == nil {
+ break
+ }
+ if err == ErrRetry {
+ if len(name) > 1 {
+ err = nil
+ for _, letter := range strings.Split(name, "") {
+ fs.args = append([]string{"-" + letter}, fs.args...)
+ seen2, _, err2 := fs.parseOne()
+ if seen2 {
+ continue
+ }
+ if err2 != nil {
+ err = fs.failf("flag provided but not defined: -%s", name)
+ break
+ }
+ }
+ if err == nil {
+ continue
+ }
+ } else {
+ err = fs.failf("flag provided but not defined: -%s", name)
+ }
+ }
+ switch fs.errorHandling {
+ case ContinueOnError:
+ return err
+ case ExitOnError:
+ os.Exit(125)
+ case PanicOnError:
+ panic(err)
+ }
+ }
+ return nil
+}
+
+// ParseFlags is a utility function that adds a help flag if withHelp is true,
+// calls fs.Parse(args) and prints a relevant error message if there are
+// incorrect number of arguments. It returns error only if error handling is
+// set to ContinueOnError and parsing fails. If error handling is set to
+// ExitOnError, it's safe to ignore the return value.
+func (fs *FlagSet) ParseFlags(args []string, withHelp bool) error {
+ var help *bool
+ if withHelp {
+ help = fs.Bool([]string{"#help", "-help"}, false, "Print usage")
+ }
+ if err := fs.Parse(args); err != nil {
+ return err
+ }
+ if help != nil && *help {
+ fs.SetOutput(os.Stdout)
+ fs.Usage()
+ os.Exit(0)
+ }
+ if str := fs.CheckArgs(); str != "" {
+ fs.SetOutput(os.Stderr)
+ fs.ReportError(str, withHelp)
+ fs.ShortUsage()
+ os.Exit(1)
+ }
+ return nil
+}
+
+// ReportError is a utility method that prints a user-friendly message
+// containing the error that occurred during parsing and a suggestion to get help
+func (fs *FlagSet) ReportError(str string, withHelp bool) {
+ if withHelp {
+ if os.Args[0] == fs.Name() {
+ str += ".\nSee '" + os.Args[0] + " --help'"
+ } else {
+ str += ".\nSee '" + os.Args[0] + " " + fs.Name() + " --help'"
+ }
+ }
+ fmt.Fprintf(fs.Out(), "%s: %s.\n", os.Args[0], str)
+}
+
+// Parsed reports whether fs.Parse has been called.
+func (fs *FlagSet) Parsed() bool {
+ return fs.parsed
+}
+
+// Parse parses the command-line flags from os.Args[1:]. Must be called
+// after all flags are defined and before flags are accessed by the program.
+func Parse() {
+ // Ignore errors; CommandLine is set for ExitOnError.
+ CommandLine.Parse(os.Args[1:])
+}
+
+// Parsed returns true if the command-line flags have been parsed.
+func Parsed() bool {
+ return CommandLine.Parsed()
+}
+
+// CommandLine is the default set of command-line flags, parsed from os.Args.
+// The top-level functions such as BoolVar, Arg, and on are wrappers for the
+// methods of CommandLine.
+var CommandLine = NewFlagSet(os.Args[0], ExitOnError)
+
+// NewFlagSet returns a new, empty flag set with the specified name and
+// error handling property.
+func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet {
+ f := &FlagSet{
+ name: name,
+ errorHandling: errorHandling,
+ }
+ return f
+}
+
+// Init sets the name and error handling property for a flag set.
+// By default, the zero FlagSet uses an empty name and the
+// ContinueOnError error handling policy.
+func (fs *FlagSet) Init(name string, errorHandling ErrorHandling) {
+ fs.name = name
+ fs.errorHandling = errorHandling
+}
+
+type mergeVal struct {
+ Value
+ key string
+ fset *FlagSet
+}
+
+func (v mergeVal) Set(s string) error {
+ return v.fset.Set(v.key, s)
+}
+
+func (v mergeVal) IsBoolFlag() bool {
+ if b, ok := v.Value.(boolFlag); ok {
+ return b.IsBoolFlag()
+ }
+ return false
+}
+
+// Name returns the name of a mergeVal.
+// If the original value had a name, return the original name,
+// otherwise, return the key asinged to this mergeVal.
+func (v mergeVal) Name() string {
+ type namedValue interface {
+ Name() string
+ }
+ if nVal, ok := v.Value.(namedValue); ok {
+ return nVal.Name()
+ }
+ return v.key
+}
+
+// Merge is a helper function that merges n FlagSets into a single dest FlagSet
+// In case of name collision between the flagsets it will apply
+// the destination FlagSet's errorHandling behavior.
+func Merge(dest *FlagSet, flagsets ...*FlagSet) error {
+ for _, fset := range flagsets {
+ if fset.formal == nil {
+ continue
+ }
+ for k, f := range fset.formal {
+ if _, ok := dest.formal[k]; ok {
+ var err error
+ if fset.name == "" {
+ err = fmt.Errorf("flag redefined: %s", k)
+ } else {
+ err = fmt.Errorf("%s flag redefined: %s", fset.name, k)
+ }
+ fmt.Fprintln(fset.Out(), err.Error())
+ // Happens only if flags are declared with identical names
+ switch dest.errorHandling {
+ case ContinueOnError:
+ return err
+ case ExitOnError:
+ os.Exit(2)
+ case PanicOnError:
+ panic(err)
+ }
+ }
+ newF := *f
+ newF.Value = mergeVal{f.Value, k, fset}
+ if dest.formal == nil {
+ dest.formal = make(map[string]*Flag)
+ }
+ dest.formal[k] = &newF
+ }
+ }
+ return nil
+}
+
+// IsEmpty reports if the FlagSet is actually empty.
+func (fs *FlagSet) IsEmpty() bool {
+ return len(fs.actual) == 0
+}
diff --git a/vendor/github.com/docker/docker/pkg/mflag/flag_test.go b/vendor/github.com/docker/docker/pkg/mflag/flag_test.go
new file mode 100644
index 00000000000..138355546ea
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mflag/flag_test.go
@@ -0,0 +1,527 @@
+// Copyright 2014-2016 The Docker & Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mflag
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "sort"
+ "strings"
+ "testing"
+ "time"
+)
+
+// ResetForTesting clears all flag state and sets the usage function as directed.
+// After calling ResetForTesting, parse errors in flag handling will not
+// exit the program.
+func ResetForTesting(usage func()) {
+ CommandLine = NewFlagSet(os.Args[0], ContinueOnError)
+ Usage = usage
+}
+func boolString(s string) string {
+ if s == "0" {
+ return "false"
+ }
+ return "true"
+}
+
+func TestEverything(t *testing.T) {
+ ResetForTesting(nil)
+ Bool([]string{"test_bool"}, false, "bool value")
+ Int([]string{"test_int"}, 0, "int value")
+ Int64([]string{"test_int64"}, 0, "int64 value")
+ Uint([]string{"test_uint"}, 0, "uint value")
+ Uint64([]string{"test_uint64"}, 0, "uint64 value")
+ String([]string{"test_string"}, "0", "string value")
+ Float64([]string{"test_float64"}, 0, "float64 value")
+ Duration([]string{"test_duration"}, 0, "time.Duration value")
+
+ m := make(map[string]*Flag)
+ desired := "0"
+ visitor := func(f *Flag) {
+ for _, name := range f.Names {
+ if len(name) > 5 && name[0:5] == "test_" {
+ m[name] = f
+ ok := false
+ switch {
+ case f.Value.String() == desired:
+ ok = true
+ case name == "test_bool" && f.Value.String() == boolString(desired):
+ ok = true
+ case name == "test_duration" && f.Value.String() == desired+"s":
+ ok = true
+ }
+ if !ok {
+ t.Error("Visit: bad value", f.Value.String(), "for", name)
+ }
+ }
+ }
+ }
+ VisitAll(visitor)
+ if len(m) != 8 {
+ t.Error("VisitAll misses some flags")
+ for k, v := range m {
+ t.Log(k, *v)
+ }
+ }
+ m = make(map[string]*Flag)
+ Visit(visitor)
+ if len(m) != 0 {
+ t.Errorf("Visit sees unset flags")
+ for k, v := range m {
+ t.Log(k, *v)
+ }
+ }
+ // Now set all flags
+ Set("test_bool", "true")
+ Set("test_int", "1")
+ Set("test_int64", "1")
+ Set("test_uint", "1")
+ Set("test_uint64", "1")
+ Set("test_string", "1")
+ Set("test_float64", "1")
+ Set("test_duration", "1s")
+ desired = "1"
+ Visit(visitor)
+ if len(m) != 8 {
+ t.Error("Visit fails after set")
+ for k, v := range m {
+ t.Log(k, *v)
+ }
+ }
+ // Now test they're visited in sort order.
+ var flagNames []string
+ Visit(func(f *Flag) {
+ for _, name := range f.Names {
+ flagNames = append(flagNames, name)
+ }
+ })
+ if !sort.StringsAreSorted(flagNames) {
+ t.Errorf("flag names not sorted: %v", flagNames)
+ }
+}
+
+func TestGet(t *testing.T) {
+ ResetForTesting(nil)
+ Bool([]string{"test_bool"}, true, "bool value")
+ Int([]string{"test_int"}, 1, "int value")
+ Int64([]string{"test_int64"}, 2, "int64 value")
+ Uint([]string{"test_uint"}, 3, "uint value")
+ Uint64([]string{"test_uint64"}, 4, "uint64 value")
+ String([]string{"test_string"}, "5", "string value")
+ Float64([]string{"test_float64"}, 6, "float64 value")
+ Duration([]string{"test_duration"}, 7, "time.Duration value")
+
+ visitor := func(f *Flag) {
+ for _, name := range f.Names {
+ if len(name) > 5 && name[0:5] == "test_" {
+ g, ok := f.Value.(Getter)
+ if !ok {
+ t.Errorf("Visit: value does not satisfy Getter: %T", f.Value)
+ return
+ }
+ switch name {
+ case "test_bool":
+ ok = g.Get() == true
+ case "test_int":
+ ok = g.Get() == int(1)
+ case "test_int64":
+ ok = g.Get() == int64(2)
+ case "test_uint":
+ ok = g.Get() == uint(3)
+ case "test_uint64":
+ ok = g.Get() == uint64(4)
+ case "test_string":
+ ok = g.Get() == "5"
+ case "test_float64":
+ ok = g.Get() == float64(6)
+ case "test_duration":
+ ok = g.Get() == time.Duration(7)
+ }
+ if !ok {
+ t.Errorf("Visit: bad value %T(%v) for %s", g.Get(), g.Get(), name)
+ }
+ }
+ }
+ }
+ VisitAll(visitor)
+}
+
+func testParse(f *FlagSet, t *testing.T) {
+ if f.Parsed() {
+ t.Error("f.Parse() = true before Parse")
+ }
+ boolFlag := f.Bool([]string{"bool"}, false, "bool value")
+ bool2Flag := f.Bool([]string{"bool2"}, false, "bool2 value")
+ f.Bool([]string{"bool3"}, false, "bool3 value")
+ bool4Flag := f.Bool([]string{"bool4"}, false, "bool4 value")
+ intFlag := f.Int([]string{"-int"}, 0, "int value")
+ int64Flag := f.Int64([]string{"-int64"}, 0, "int64 value")
+ uintFlag := f.Uint([]string{"uint"}, 0, "uint value")
+ uint64Flag := f.Uint64([]string{"-uint64"}, 0, "uint64 value")
+ stringFlag := f.String([]string{"string"}, "0", "string value")
+ f.String([]string{"string2"}, "0", "string2 value")
+ singleQuoteFlag := f.String([]string{"squote"}, "", "single quoted value")
+ doubleQuoteFlag := f.String([]string{"dquote"}, "", "double quoted value")
+ mixedQuoteFlag := f.String([]string{"mquote"}, "", "mixed quoted value")
+ mixed2QuoteFlag := f.String([]string{"mquote2"}, "", "mixed2 quoted value")
+ nestedQuoteFlag := f.String([]string{"nquote"}, "", "nested quoted value")
+ nested2QuoteFlag := f.String([]string{"nquote2"}, "", "nested2 quoted value")
+ float64Flag := f.Float64([]string{"float64"}, 0, "float64 value")
+ durationFlag := f.Duration([]string{"duration"}, 5*time.Second, "time.Duration value")
+ extra := "one-extra-argument"
+ args := []string{
+ "-bool",
+ "-bool2=true",
+ "-bool4=false",
+ "--int", "22",
+ "--int64", "0x23",
+ "-uint", "24",
+ "--uint64", "25",
+ "-string", "hello",
+ "-squote='single'",
+ `-dquote="double"`,
+ `-mquote='mixed"`,
+ `-mquote2="mixed2'`,
+ `-nquote="'single nested'"`,
+ `-nquote2='"double nested"'`,
+ "-float64", "2718e28",
+ "-duration", "2m",
+ extra,
+ }
+ if err := f.Parse(args); err != nil {
+ t.Fatal(err)
+ }
+ if !f.Parsed() {
+ t.Error("f.Parse() = false after Parse")
+ }
+ if *boolFlag != true {
+ t.Error("bool flag should be true, is ", *boolFlag)
+ }
+ if *bool2Flag != true {
+ t.Error("bool2 flag should be true, is ", *bool2Flag)
+ }
+ if !f.IsSet("bool2") {
+ t.Error("bool2 should be marked as set")
+ }
+ if f.IsSet("bool3") {
+ t.Error("bool3 should not be marked as set")
+ }
+ if !f.IsSet("bool4") {
+ t.Error("bool4 should be marked as set")
+ }
+ if *bool4Flag != false {
+ t.Error("bool4 flag should be false, is ", *bool4Flag)
+ }
+ if *intFlag != 22 {
+ t.Error("int flag should be 22, is ", *intFlag)
+ }
+ if *int64Flag != 0x23 {
+ t.Error("int64 flag should be 0x23, is ", *int64Flag)
+ }
+ if *uintFlag != 24 {
+ t.Error("uint flag should be 24, is ", *uintFlag)
+ }
+ if *uint64Flag != 25 {
+ t.Error("uint64 flag should be 25, is ", *uint64Flag)
+ }
+ if *stringFlag != "hello" {
+ t.Error("string flag should be `hello`, is ", *stringFlag)
+ }
+ if !f.IsSet("string") {
+ t.Error("string flag should be marked as set")
+ }
+ if f.IsSet("string2") {
+ t.Error("string2 flag should not be marked as set")
+ }
+ if *singleQuoteFlag != "single" {
+ t.Error("single quote string flag should be `single`, is ", *singleQuoteFlag)
+ }
+ if *doubleQuoteFlag != "double" {
+ t.Error("double quote string flag should be `double`, is ", *doubleQuoteFlag)
+ }
+ if *mixedQuoteFlag != `'mixed"` {
+ t.Error("mixed quote string flag should be `'mixed\"`, is ", *mixedQuoteFlag)
+ }
+ if *mixed2QuoteFlag != `"mixed2'` {
+ t.Error("mixed2 quote string flag should be `\"mixed2'`, is ", *mixed2QuoteFlag)
+ }
+ if *nestedQuoteFlag != "'single nested'" {
+ t.Error("nested quote string flag should be `'single nested'`, is ", *nestedQuoteFlag)
+ }
+ if *nested2QuoteFlag != `"double nested"` {
+ t.Error("double quote string flag should be `\"double nested\"`, is ", *nested2QuoteFlag)
+ }
+ if *float64Flag != 2718e28 {
+ t.Error("float64 flag should be 2718e28, is ", *float64Flag)
+ }
+ if *durationFlag != 2*time.Minute {
+ t.Error("duration flag should be 2m, is ", *durationFlag)
+ }
+ if len(f.Args()) != 1 {
+ t.Error("expected one argument, got", len(f.Args()))
+ } else if f.Args()[0] != extra {
+ t.Errorf("expected argument %q got %q", extra, f.Args()[0])
+ }
+}
+
+func testPanic(f *FlagSet, t *testing.T) {
+ f.Int([]string{"-int"}, 0, "int value")
+ if f.Parsed() {
+ t.Error("f.Parse() = true before Parse")
+ }
+ args := []string{
+ "-int", "21",
+ }
+ f.Parse(args)
+}
+
+func TestParsePanic(t *testing.T) {
+ ResetForTesting(func() {})
+ testPanic(CommandLine, t)
+}
+
+func TestParse(t *testing.T) {
+ ResetForTesting(func() { t.Error("bad parse") })
+ testParse(CommandLine, t)
+}
+
+func TestFlagSetParse(t *testing.T) {
+ testParse(NewFlagSet("test", ContinueOnError), t)
+}
+
+// Declare a user-defined flag type.
+type flagVar []string
+
+func (f *flagVar) String() string {
+ return fmt.Sprint([]string(*f))
+}
+
+func (f *flagVar) Set(value string) error {
+ *f = append(*f, value)
+ return nil
+}
+
+func TestUserDefined(t *testing.T) {
+ var flags FlagSet
+ flags.Init("test", ContinueOnError)
+ var v flagVar
+ flags.Var(&v, []string{"v"}, "usage")
+ if err := flags.Parse([]string{"-v", "1", "-v", "2", "-v=3"}); err != nil {
+ t.Error(err)
+ }
+ if len(v) != 3 {
+ t.Fatal("expected 3 args; got ", len(v))
+ }
+ expect := "[1 2 3]"
+ if v.String() != expect {
+ t.Errorf("expected value %q got %q", expect, v.String())
+ }
+}
+
+// Declare a user-defined boolean flag type.
+type boolFlagVar struct {
+ count int
+}
+
+func (b *boolFlagVar) String() string {
+ return fmt.Sprintf("%d", b.count)
+}
+
+func (b *boolFlagVar) Set(value string) error {
+ if value == "true" {
+ b.count++
+ }
+ return nil
+}
+
+func (b *boolFlagVar) IsBoolFlag() bool {
+ return b.count < 4
+}
+
+func TestUserDefinedBool(t *testing.T) {
+ var flags FlagSet
+ flags.Init("test", ContinueOnError)
+ var b boolFlagVar
+ var err error
+ flags.Var(&b, []string{"b"}, "usage")
+ if err = flags.Parse([]string{"-b", "-b", "-b", "-b=true", "-b=false", "-b", "barg", "-b"}); err != nil {
+ if b.count < 4 {
+ t.Error(err)
+ }
+ }
+
+ if b.count != 4 {
+ t.Errorf("want: %d; got: %d", 4, b.count)
+ }
+
+ if err == nil {
+ t.Error("expected error; got none")
+ }
+}
+
+func TestSetOutput(t *testing.T) {
+ var flags FlagSet
+ var buf bytes.Buffer
+ flags.SetOutput(&buf)
+ flags.Init("test", ContinueOnError)
+ flags.Parse([]string{"-unknown"})
+ if out := buf.String(); !strings.Contains(out, "-unknown") {
+ t.Logf("expected output mentioning unknown; got %q", out)
+ }
+}
+
+// This tests that one can reset the flags. This still works but not well, and is
+// superseded by FlagSet.
+func TestChangingArgs(t *testing.T) {
+ ResetForTesting(func() { t.Fatal("bad parse") })
+ oldArgs := os.Args
+ defer func() { os.Args = oldArgs }()
+ os.Args = []string{"cmd", "-before", "subcmd", "-after", "args"}
+ before := Bool([]string{"before"}, false, "")
+ if err := CommandLine.Parse(os.Args[1:]); err != nil {
+ t.Fatal(err)
+ }
+ cmd := Arg(0)
+ os.Args = Args()
+ after := Bool([]string{"after"}, false, "")
+ Parse()
+ args := Args()
+
+ if !*before || cmd != "subcmd" || !*after || len(args) != 1 || args[0] != "args" {
+ t.Fatalf("expected true subcmd true [args] got %v %v %v %v", *before, cmd, *after, args)
+ }
+}
+
+// Test that -help invokes the usage message and returns ErrHelp.
+func TestHelp(t *testing.T) {
+ var helpCalled = false
+ fs := NewFlagSet("help test", ContinueOnError)
+ fs.Usage = func() { helpCalled = true }
+ var flag bool
+ fs.BoolVar(&flag, []string{"flag"}, false, "regular flag")
+ // Regular flag invocation should work
+ err := fs.Parse([]string{"-flag=true"})
+ if err != nil {
+ t.Fatal("expected no error; got ", err)
+ }
+ if !flag {
+ t.Error("flag was not set by -flag")
+ }
+ if helpCalled {
+ t.Error("help called for regular flag")
+ helpCalled = false // reset for next test
+ }
+ // Help flag should work as expected.
+ err = fs.Parse([]string{"-help"})
+ if err == nil {
+ t.Fatal("error expected")
+ }
+ if err != ErrHelp {
+ t.Fatal("expected ErrHelp; got ", err)
+ }
+ if !helpCalled {
+ t.Fatal("help was not called")
+ }
+ // If we define a help flag, that should override.
+ var help bool
+ fs.BoolVar(&help, []string{"help"}, false, "help flag")
+ helpCalled = false
+ err = fs.Parse([]string{"-help"})
+ if err != nil {
+ t.Fatal("expected no error for defined -help; got ", err)
+ }
+ if helpCalled {
+ t.Fatal("help was called; should not have been for defined help flag")
+ }
+}
+
+// Test the flag count functions.
+func TestFlagCounts(t *testing.T) {
+ fs := NewFlagSet("help test", ContinueOnError)
+ var flag bool
+ fs.BoolVar(&flag, []string{"flag1"}, false, "regular flag")
+ fs.BoolVar(&flag, []string{"#deprecated1"}, false, "regular flag")
+ fs.BoolVar(&flag, []string{"f", "flag2"}, false, "regular flag")
+ fs.BoolVar(&flag, []string{"#d", "#deprecated2"}, false, "regular flag")
+ fs.BoolVar(&flag, []string{"flag3"}, false, "regular flag")
+ fs.BoolVar(&flag, []string{"g", "#flag4", "-flag4"}, false, "regular flag")
+
+ if fs.FlagCount() != 6 {
+ t.Fatal("FlagCount wrong. ", fs.FlagCount())
+ }
+ if fs.FlagCountUndeprecated() != 4 {
+ t.Fatal("FlagCountUndeprecated wrong. ", fs.FlagCountUndeprecated())
+ }
+ if fs.NFlag() != 0 {
+ t.Fatal("NFlag wrong. ", fs.NFlag())
+ }
+ err := fs.Parse([]string{"-fd", "-g", "-flag4"})
+ if err != nil {
+ t.Fatal("expected no error for defined -help; got ", err)
+ }
+ if fs.NFlag() != 4 {
+ t.Fatal("NFlag wrong. ", fs.NFlag())
+ }
+}
+
+// Show up bug in sortFlags
+func TestSortFlags(t *testing.T) {
+ fs := NewFlagSet("help TestSortFlags", ContinueOnError)
+
+ var err error
+
+ var b bool
+ fs.BoolVar(&b, []string{"b", "-banana"}, false, "usage")
+
+ err = fs.Parse([]string{"--banana=true"})
+ if err != nil {
+ t.Fatal("expected no error; got ", err)
+ }
+
+ count := 0
+
+ fs.VisitAll(func(flag *Flag) {
+ count++
+ if flag == nil {
+ t.Fatal("VisitAll should not return a nil flag")
+ }
+ })
+ flagcount := fs.FlagCount()
+ if flagcount != count {
+ t.Fatalf("FlagCount (%d) != number (%d) of elements visited", flagcount, count)
+ }
+ // Make sure its idempotent
+ if flagcount != fs.FlagCount() {
+ t.Fatalf("FlagCount (%d) != fs.FlagCount() (%d) of elements visited", flagcount, fs.FlagCount())
+ }
+
+ count = 0
+ fs.Visit(func(flag *Flag) {
+ count++
+ if flag == nil {
+ t.Fatal("Visit should not return a nil flag")
+ }
+ })
+ nflag := fs.NFlag()
+ if nflag != count {
+ t.Fatalf("NFlag (%d) != number (%d) of elements visited", nflag, count)
+ }
+ if nflag != fs.NFlag() {
+ t.Fatalf("NFlag (%d) != fs.NFlag() (%d) of elements visited", nflag, fs.NFlag())
+ }
+}
+
+func TestMergeFlags(t *testing.T) {
+ base := NewFlagSet("base", ContinueOnError)
+ base.String([]string{"f"}, "", "")
+
+ fs := NewFlagSet("test", ContinueOnError)
+ Merge(fs, base)
+ if len(fs.formal) != 1 {
+ t.Fatalf("FlagCount (%d) != number (1) of elements merged", len(fs.formal))
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/flags.go b/vendor/github.com/docker/docker/pkg/mount/flags.go
new file mode 100644
index 00000000000..607dbed43a0
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/flags.go
@@ -0,0 +1,149 @@
+package mount
+
+import (
+ "fmt"
+ "strings"
+)
+
+var flags = map[string]struct {
+ clear bool
+ flag int
+}{
+ "defaults": {false, 0},
+ "ro": {false, RDONLY},
+ "rw": {true, RDONLY},
+ "suid": {true, NOSUID},
+ "nosuid": {false, NOSUID},
+ "dev": {true, NODEV},
+ "nodev": {false, NODEV},
+ "exec": {true, NOEXEC},
+ "noexec": {false, NOEXEC},
+ "sync": {false, SYNCHRONOUS},
+ "async": {true, SYNCHRONOUS},
+ "dirsync": {false, DIRSYNC},
+ "remount": {false, REMOUNT},
+ "mand": {false, MANDLOCK},
+ "nomand": {true, MANDLOCK},
+ "atime": {true, NOATIME},
+ "noatime": {false, NOATIME},
+ "diratime": {true, NODIRATIME},
+ "nodiratime": {false, NODIRATIME},
+ "bind": {false, BIND},
+ "rbind": {false, RBIND},
+ "unbindable": {false, UNBINDABLE},
+ "runbindable": {false, RUNBINDABLE},
+ "private": {false, PRIVATE},
+ "rprivate": {false, RPRIVATE},
+ "shared": {false, SHARED},
+ "rshared": {false, RSHARED},
+ "slave": {false, SLAVE},
+ "rslave": {false, RSLAVE},
+ "relatime": {false, RELATIME},
+ "norelatime": {true, RELATIME},
+ "strictatime": {false, STRICTATIME},
+ "nostrictatime": {true, STRICTATIME},
+}
+
+var validFlags = map[string]bool{
+ "": true,
+ "size": true,
+ "mode": true,
+ "uid": true,
+ "gid": true,
+ "nr_inodes": true,
+ "nr_blocks": true,
+ "mpol": true,
+}
+
+var propagationFlags = map[string]bool{
+ "bind": true,
+ "rbind": true,
+ "unbindable": true,
+ "runbindable": true,
+ "private": true,
+ "rprivate": true,
+ "shared": true,
+ "rshared": true,
+ "slave": true,
+ "rslave": true,
+}
+
+// MergeTmpfsOptions merge mount options to make sure there is no duplicate.
+func MergeTmpfsOptions(options []string) ([]string, error) {
+ // We use collisions maps to remove duplicates.
+ // For flag, the key is the flag value (the key for propagation flag is -1)
+ // For data=value, the key is the data
+ flagCollisions := map[int]bool{}
+ dataCollisions := map[string]bool{}
+
+ var newOptions []string
+ // We process in reverse order
+ for i := len(options) - 1; i >= 0; i-- {
+ option := options[i]
+ if option == "defaults" {
+ continue
+ }
+ if f, ok := flags[option]; ok && f.flag != 0 {
+ // There is only one propagation mode
+ key := f.flag
+ if propagationFlags[option] {
+ key = -1
+ }
+ // Check to see if there is collision for flag
+ if !flagCollisions[key] {
+ // We prepend the option and add to collision map
+ newOptions = append([]string{option}, newOptions...)
+ flagCollisions[key] = true
+ }
+ continue
+ }
+ opt := strings.SplitN(option, "=", 2)
+ if len(opt) != 2 || !validFlags[opt[0]] {
+ return nil, fmt.Errorf("Invalid tmpfs option %q", opt)
+ }
+ if !dataCollisions[opt[0]] {
+ // We prepend the option and add to collision map
+ newOptions = append([]string{option}, newOptions...)
+ dataCollisions[opt[0]] = true
+ }
+ }
+
+ return newOptions, nil
+}
+
+// Parse fstab type mount options into mount() flags
+// and device specific data
+func parseOptions(options string) (int, string) {
+ var (
+ flag int
+ data []string
+ )
+
+ for _, o := range strings.Split(options, ",") {
+ // If the option does not exist in the flags table or the flag
+ // is not supported on the platform,
+ // then it is a data value for a specific fs type
+ if f, exists := flags[o]; exists && f.flag != 0 {
+ if f.clear {
+ flag &= ^f.flag
+ } else {
+ flag |= f.flag
+ }
+ } else {
+ data = append(data, o)
+ }
+ }
+ return flag, strings.Join(data, ",")
+}
+
+// ParseTmpfsOptions parse fstab type mount options into flags and data
+func ParseTmpfsOptions(options string) (int, string, error) {
+ flags, data := parseOptions(options)
+ for _, o := range strings.Split(data, ",") {
+ opt := strings.SplitN(o, "=", 2)
+ if !validFlags[opt[0]] {
+ return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt)
+ }
+ }
+ return flags, data, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go
new file mode 100644
index 00000000000..f166cb2f778
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go
@@ -0,0 +1,48 @@
+// +build freebsd,cgo
+
+package mount
+
+/*
+#include
+*/
+import "C"
+
+const (
+ // RDONLY will mount the filesystem as read-only.
+ RDONLY = C.MNT_RDONLY
+
+ // NOSUID will not allow set-user-identifier or set-group-identifier bits to
+ // take effect.
+ NOSUID = C.MNT_NOSUID
+
+ // NOEXEC will not allow execution of any binaries on the mounted file system.
+ NOEXEC = C.MNT_NOEXEC
+
+ // SYNCHRONOUS will allow any I/O to the file system to be done synchronously.
+ SYNCHRONOUS = C.MNT_SYNCHRONOUS
+
+ // NOATIME will not update the file access time when reading from a file.
+ NOATIME = C.MNT_NOATIME
+)
+
+// These flags are unsupported.
+const (
+ BIND = 0
+ DIRSYNC = 0
+ MANDLOCK = 0
+ NODEV = 0
+ NODIRATIME = 0
+ UNBINDABLE = 0
+ RUNBINDABLE = 0
+ PRIVATE = 0
+ RPRIVATE = 0
+ SHARED = 0
+ RSHARED = 0
+ SLAVE = 0
+ RSLAVE = 0
+ RBIND = 0
+ RELATIVE = 0
+ RELATIME = 0
+ REMOUNT = 0
+ STRICTATIME = 0
+)
diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_linux.go b/vendor/github.com/docker/docker/pkg/mount/flags_linux.go
new file mode 100644
index 00000000000..dc696dce907
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/flags_linux.go
@@ -0,0 +1,85 @@
+package mount
+
+import (
+ "syscall"
+)
+
+const (
+ // RDONLY will mount the file system read-only.
+ RDONLY = syscall.MS_RDONLY
+
+ // NOSUID will not allow set-user-identifier or set-group-identifier bits to
+ // take effect.
+ NOSUID = syscall.MS_NOSUID
+
+ // NODEV will not interpret character or block special devices on the file
+ // system.
+ NODEV = syscall.MS_NODEV
+
+ // NOEXEC will not allow execution of any binaries on the mounted file system.
+ NOEXEC = syscall.MS_NOEXEC
+
+ // SYNCHRONOUS will allow I/O to the file system to be done synchronously.
+ SYNCHRONOUS = syscall.MS_SYNCHRONOUS
+
+ // DIRSYNC will force all directory updates within the file system to be done
+ // synchronously. This affects the following system calls: create, link,
+ // unlink, symlink, mkdir, rmdir, mknod and rename.
+ DIRSYNC = syscall.MS_DIRSYNC
+
+ // REMOUNT will attempt to remount an already-mounted file system. This is
+ // commonly used to change the mount flags for a file system, especially to
+ // make a readonly file system writeable. It does not change device or mount
+ // point.
+ REMOUNT = syscall.MS_REMOUNT
+
+ // MANDLOCK will force mandatory locks on a filesystem.
+ MANDLOCK = syscall.MS_MANDLOCK
+
+ // NOATIME will not update the file access time when reading from a file.
+ NOATIME = syscall.MS_NOATIME
+
+ // NODIRATIME will not update the directory access time.
+ NODIRATIME = syscall.MS_NODIRATIME
+
+ // BIND remounts a subtree somewhere else.
+ BIND = syscall.MS_BIND
+
+ // RBIND remounts a subtree and all possible submounts somewhere else.
+ RBIND = syscall.MS_BIND | syscall.MS_REC
+
+ // UNBINDABLE creates a mount which cannot be cloned through a bind operation.
+ UNBINDABLE = syscall.MS_UNBINDABLE
+
+ // RUNBINDABLE marks the entire mount tree as UNBINDABLE.
+ RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC
+
+ // PRIVATE creates a mount which carries no propagation abilities.
+ PRIVATE = syscall.MS_PRIVATE
+
+ // RPRIVATE marks the entire mount tree as PRIVATE.
+ RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC
+
+ // SLAVE creates a mount which receives propagation from its master, but not
+ // vice versa.
+ SLAVE = syscall.MS_SLAVE
+
+ // RSLAVE marks the entire mount tree as SLAVE.
+ RSLAVE = syscall.MS_SLAVE | syscall.MS_REC
+
+ // SHARED creates a mount which provides the ability to create mirrors of
+ // that mount such that mounts and unmounts within any of the mirrors
+ // propagate to the other mirrors.
+ SHARED = syscall.MS_SHARED
+
+ // RSHARED marks the entire mount tree as SHARED.
+ RSHARED = syscall.MS_SHARED | syscall.MS_REC
+
+ // RELATIME updates inode access times relative to modify or change time.
+ RELATIME = syscall.MS_RELATIME
+
+ // STRICTATIME allows to explicitly request full atime updates. This makes
+ // it possible for the kernel to default to relatime or noatime but still
+ // allow userspace to override it.
+ STRICTATIME = syscall.MS_STRICTATIME
+)
diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go
new file mode 100644
index 00000000000..5564f7b3cde
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go
@@ -0,0 +1,30 @@
+// +build !linux,!freebsd freebsd,!cgo solaris,!cgo
+
+package mount
+
+// These flags are unsupported.
+const (
+ BIND = 0
+ DIRSYNC = 0
+ MANDLOCK = 0
+ NOATIME = 0
+ NODEV = 0
+ NODIRATIME = 0
+ NOEXEC = 0
+ NOSUID = 0
+ UNBINDABLE = 0
+ RUNBINDABLE = 0
+ PRIVATE = 0
+ RPRIVATE = 0
+ SHARED = 0
+ RSHARED = 0
+ SLAVE = 0
+ RSLAVE = 0
+ RBIND = 0
+ RELATIME = 0
+ RELATIVE = 0
+ REMOUNT = 0
+ STRICTATIME = 0
+ SYNCHRONOUS = 0
+ RDONLY = 0
+)
diff --git a/vendor/github.com/docker/docker/pkg/mount/mount.go b/vendor/github.com/docker/docker/pkg/mount/mount.go
new file mode 100644
index 00000000000..66ac4bf4723
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mount.go
@@ -0,0 +1,74 @@
+package mount
+
+import (
+ "time"
+)
+
+// GetMounts retrieves a list of mounts for the current running process.
+func GetMounts() ([]*Info, error) {
+ return parseMountTable()
+}
+
+// Mounted determines if a specified mountpoint has been mounted.
+// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab.
+func Mounted(mountpoint string) (bool, error) {
+ entries, err := parseMountTable()
+ if err != nil {
+ return false, err
+ }
+
+ // Search the table for the mountpoint
+ for _, e := range entries {
+ if e.Mountpoint == mountpoint {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+// Mount will mount filesystem according to the specified configuration, on the
+// condition that the target path is *not* already mounted. Options must be
+// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
+// flags.go for supported option flags.
+func Mount(device, target, mType, options string) error {
+ flag, _ := parseOptions(options)
+ if flag&REMOUNT != REMOUNT {
+ if mounted, err := Mounted(target); err != nil || mounted {
+ return err
+ }
+ }
+ return ForceMount(device, target, mType, options)
+}
+
+// ForceMount will mount a filesystem according to the specified configuration,
+// *regardless* if the target path is not already mounted. Options must be
+// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
+// flags.go for supported option flags.
+func ForceMount(device, target, mType, options string) error {
+ flag, data := parseOptions(options)
+ if err := mount(device, target, mType, uintptr(flag), data); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Unmount will unmount the target filesystem, so long as it is mounted.
+func Unmount(target string) error {
+ if mounted, err := Mounted(target); err != nil || !mounted {
+ return err
+ }
+ return ForceUnmount(target)
+}
+
+// ForceUnmount will force an unmount of the target filesystem, regardless if
+// it is mounted or not.
+func ForceUnmount(target string) (err error) {
+ // Simple retry logic for unmount
+ for i := 0; i < 10; i++ {
+ if err = unmount(target, 0); err == nil {
+ return nil
+ }
+ time.Sleep(100 * time.Millisecond)
+ }
+ return
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mount_unix_test.go b/vendor/github.com/docker/docker/pkg/mount/mount_unix_test.go
new file mode 100644
index 00000000000..90fa348b227
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mount_unix_test.go
@@ -0,0 +1,162 @@
+// +build !windows
+
+package mount
+
+import (
+ "os"
+ "path"
+ "testing"
+)
+
+func TestMountOptionsParsing(t *testing.T) {
+ options := "noatime,ro,size=10k"
+
+ flag, data := parseOptions(options)
+
+ if data != "size=10k" {
+ t.Fatalf("Expected size=10 got %s", data)
+ }
+
+ expectedFlag := NOATIME | RDONLY
+
+ if flag != expectedFlag {
+ t.Fatalf("Expected %d got %d", expectedFlag, flag)
+ }
+}
+
+func TestMounted(t *testing.T) {
+ tmp := path.Join(os.TempDir(), "mount-tests")
+ if err := os.MkdirAll(tmp, 0777); err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmp)
+
+ var (
+ sourceDir = path.Join(tmp, "source")
+ targetDir = path.Join(tmp, "target")
+ sourcePath = path.Join(sourceDir, "file.txt")
+ targetPath = path.Join(targetDir, "file.txt")
+ )
+
+ os.Mkdir(sourceDir, 0777)
+ os.Mkdir(targetDir, 0777)
+
+ f, err := os.Create(sourcePath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ f.WriteString("hello")
+ f.Close()
+
+ f, err = os.Create(targetPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ f.Close()
+
+ if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ if err := Unmount(targetDir); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ mounted, err := Mounted(targetDir)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !mounted {
+ t.Fatalf("Expected %s to be mounted", targetDir)
+ }
+ if _, err := os.Stat(targetDir); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestMountReadonly(t *testing.T) {
+ tmp := path.Join(os.TempDir(), "mount-tests")
+ if err := os.MkdirAll(tmp, 0777); err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmp)
+
+ var (
+ sourceDir = path.Join(tmp, "source")
+ targetDir = path.Join(tmp, "target")
+ sourcePath = path.Join(sourceDir, "file.txt")
+ targetPath = path.Join(targetDir, "file.txt")
+ )
+
+ os.Mkdir(sourceDir, 0777)
+ os.Mkdir(targetDir, 0777)
+
+ f, err := os.Create(sourcePath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ f.WriteString("hello")
+ f.Close()
+
+ f, err = os.Create(targetPath)
+ if err != nil {
+ t.Fatal(err)
+ }
+ f.Close()
+
+ if err := Mount(sourceDir, targetDir, "none", "bind,ro"); err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ if err := Unmount(targetDir); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ f, err = os.OpenFile(targetPath, os.O_RDWR, 0777)
+ if err == nil {
+ t.Fatal("Should not be able to open a ro file as rw")
+ }
+}
+
+func TestGetMounts(t *testing.T) {
+ mounts, err := GetMounts()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ root := false
+ for _, entry := range mounts {
+ if entry.Mountpoint == "/" {
+ root = true
+ }
+ }
+
+ if !root {
+ t.Fatal("/ should be mounted at least")
+ }
+}
+
+func TestMergeTmpfsOptions(t *testing.T) {
+ options := []string{"noatime", "ro", "size=10k", "defaults", "atime", "defaults", "rw", "rprivate", "size=1024k", "slave"}
+ expected := []string{"atime", "rw", "size=1024k", "slave"}
+ merged, err := MergeTmpfsOptions(options)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(expected) != len(merged) {
+ t.Fatalf("Expected %s got %s", expected, merged)
+ }
+ for index := range merged {
+ if merged[index] != expected[index] {
+ t.Fatalf("Expected %s for the %dth option, got %s", expected, index, merged)
+ }
+ }
+
+ options = []string{"noatime", "ro", "size=10k", "atime", "rw", "rprivate", "size=1024k", "slave", "size"}
+ _, err = MergeTmpfsOptions(options)
+ if err == nil {
+ t.Fatal("Expected error got nil")
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go
new file mode 100644
index 00000000000..bb870e6f59b
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go
@@ -0,0 +1,59 @@
+package mount
+
+/*
+#include
+#include
+#include
+#include
+#include
+#include
+*/
+import "C"
+
+import (
+ "fmt"
+ "strings"
+ "syscall"
+ "unsafe"
+)
+
+func allocateIOVecs(options []string) []C.struct_iovec {
+ out := make([]C.struct_iovec, len(options))
+ for i, option := range options {
+ out[i].iov_base = unsafe.Pointer(C.CString(option))
+ out[i].iov_len = C.size_t(len(option) + 1)
+ }
+ return out
+}
+
+func mount(device, target, mType string, flag uintptr, data string) error {
+ isNullFS := false
+
+ xs := strings.Split(data, ",")
+ for _, x := range xs {
+ if x == "bind" {
+ isNullFS = true
+ }
+ }
+
+ options := []string{"fspath", target}
+ if isNullFS {
+ options = append(options, "fstype", "nullfs", "target", device)
+ } else {
+ options = append(options, "fstype", mType, "from", device)
+ }
+ rawOptions := allocateIOVecs(options)
+ for _, rawOption := range rawOptions {
+ defer C.free(rawOption.iov_base)
+ }
+
+ if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 {
+ reason := C.GoString(C.strerror(*C.__error()))
+ return fmt.Errorf("Failed to call nmount: %s", reason)
+ }
+ return nil
+}
+
+func unmount(target string, flag int) error {
+ return syscall.Unmount(target, flag)
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go b/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go
new file mode 100644
index 00000000000..dd4280c7778
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go
@@ -0,0 +1,21 @@
+package mount
+
+import (
+ "syscall"
+)
+
+func mount(device, target, mType string, flag uintptr, data string) error {
+ if err := syscall.Mount(device, target, mType, flag, data); err != nil {
+ return err
+ }
+
+ // If we have a bind mount or remount, remount...
+ if flag&syscall.MS_BIND == syscall.MS_BIND && flag&syscall.MS_RDONLY == syscall.MS_RDONLY {
+ return syscall.Mount(device, target, mType, flag|syscall.MS_REMOUNT, data)
+ }
+ return nil
+}
+
+func unmount(target string, flag int) error {
+ return syscall.Unmount(target, flag)
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go b/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go
new file mode 100644
index 00000000000..c684aa81fcc
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go
@@ -0,0 +1,33 @@
+// +build solaris,cgo
+
+package mount
+
+import (
+ "golang.org/x/sys/unix"
+ "unsafe"
+)
+
+// #include
+// #include
+// #include
+// int Mount(const char *spec, const char *dir, int mflag,
+// char *fstype, char *dataptr, int datalen, char *optptr, int optlen) {
+// return mount(spec, dir, mflag, fstype, dataptr, datalen, optptr, optlen);
+// }
+import "C"
+
+func mount(device, target, mType string, flag uintptr, data string) error {
+ spec := C.CString(device)
+ dir := C.CString(target)
+ fstype := C.CString(mType)
+ _, err := C.Mount(spec, dir, C.int(flag), fstype, nil, 0, nil, 0)
+ C.free(unsafe.Pointer(spec))
+ C.free(unsafe.Pointer(dir))
+ C.free(unsafe.Pointer(fstype))
+ return err
+}
+
+func unmount(target string, flag int) error {
+ err := unix.Unmount(target, flag)
+ return err
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go
new file mode 100644
index 00000000000..a2a3bb457fc
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go
@@ -0,0 +1,11 @@
+// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo
+
+package mount
+
+func mount(device, target, mType string, flag uintptr, data string) error {
+ panic("Not implemented")
+}
+
+func unmount(target string, flag int) error {
+ panic("Not implemented")
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo.go
new file mode 100644
index 00000000000..e3fc3535e93
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo.go
@@ -0,0 +1,40 @@
+package mount
+
+// Info reveals information about a particular mounted filesystem. This
+// struct is populated from the content in the /proc//mountinfo file.
+type Info struct {
+ // ID is a unique identifier of the mount (may be reused after umount).
+ ID int
+
+ // Parent indicates the ID of the mount parent (or of self for the top of the
+ // mount tree).
+ Parent int
+
+ // Major indicates one half of the device ID which identifies the device class.
+ Major int
+
+ // Minor indicates one half of the device ID which identifies a specific
+ // instance of device.
+ Minor int
+
+ // Root of the mount within the filesystem.
+ Root string
+
+ // Mountpoint indicates the mount point relative to the process's root.
+ Mountpoint string
+
+ // Opts represents mount-specific options.
+ Opts string
+
+ // Optional represents optional fields.
+ Optional string
+
+ // Fstype indicates the type of filesystem, such as EXT3.
+ Fstype string
+
+ // Source indicates filesystem specific information or "none".
+ Source string
+
+ // VfsOpts represents per super block options.
+ VfsOpts string
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go
new file mode 100644
index 00000000000..4f32edcd906
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go
@@ -0,0 +1,41 @@
+package mount
+
+/*
+#include
+#include
+#include
+*/
+import "C"
+
+import (
+ "fmt"
+ "reflect"
+ "unsafe"
+)
+
+// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
+// bind mounts.
+func parseMountTable() ([]*Info, error) {
+ var rawEntries *C.struct_statfs
+
+ count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT))
+ if count == 0 {
+ return nil, fmt.Errorf("Failed to call getmntinfo")
+ }
+
+ var entries []C.struct_statfs
+ header := (*reflect.SliceHeader)(unsafe.Pointer(&entries))
+ header.Cap = count
+ header.Len = count
+ header.Data = uintptr(unsafe.Pointer(rawEntries))
+
+ var out []*Info
+ for _, entry := range entries {
+ var mountinfo Info
+ mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0])
+ mountinfo.Source = C.GoString(&entry.f_mntfromname[0])
+ mountinfo.Fstype = C.GoString(&entry.f_fstypename[0])
+ out = append(out, &mountinfo)
+ }
+ return out, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go
new file mode 100644
index 00000000000..be69fee1d7b
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go
@@ -0,0 +1,95 @@
+// +build linux
+
+package mount
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+const (
+ /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
+ (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
+
+ (1) mount ID: unique identifier of the mount (may be reused after umount)
+ (2) parent ID: ID of parent (or of self for the top of the mount tree)
+ (3) major:minor: value of st_dev for files on filesystem
+ (4) root: root of the mount within the filesystem
+ (5) mount point: mount point relative to the process's root
+ (6) mount options: per mount options
+ (7) optional fields: zero or more fields of the form "tag[:value]"
+ (8) separator: marks the end of the optional fields
+ (9) filesystem type: name of filesystem of the form "type[.subtype]"
+ (10) mount source: filesystem specific information or "none"
+ (11) super options: per super block options*/
+ mountinfoFormat = "%d %d %d:%d %s %s %s %s"
+)
+
+// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
+// bind mounts
+func parseMountTable() ([]*Info, error) {
+ f, err := os.Open("/proc/self/mountinfo")
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return parseInfoFile(f)
+}
+
+func parseInfoFile(r io.Reader) ([]*Info, error) {
+ var (
+ s = bufio.NewScanner(r)
+ out = []*Info{}
+ )
+
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ var (
+ p = &Info{}
+ text = s.Text()
+ optionalFields string
+ )
+
+ if _, err := fmt.Sscanf(text, mountinfoFormat,
+ &p.ID, &p.Parent, &p.Major, &p.Minor,
+ &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil {
+ return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err)
+ }
+ // Safe as mountinfo encodes mountpoints with spaces as \040.
+ index := strings.Index(text, " - ")
+ postSeparatorFields := strings.Fields(text[index+3:])
+ if len(postSeparatorFields) < 3 {
+ return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
+ }
+
+ if optionalFields != "-" {
+ p.Optional = optionalFields
+ }
+
+ p.Fstype = postSeparatorFields[0]
+ p.Source = postSeparatorFields[1]
+ p.VfsOpts = strings.Join(postSeparatorFields[2:], " ")
+ out = append(out, p)
+ }
+ return out, nil
+}
+
+// PidMountInfo collects the mounts for a specific process ID. If the process
+// ID is unknown, it is better to use `GetMounts` which will inspect
+// "/proc/self/mountinfo" instead.
+func PidMountInfo(pid int) ([]*Info, error) {
+ f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return parseInfoFile(f)
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go
new file mode 100644
index 00000000000..bd100e1d494
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux_test.go
@@ -0,0 +1,476 @@
+// +build linux
+
+package mount
+
+import (
+ "bytes"
+ "testing"
+)
+
+const (
+ fedoraMountinfo = `15 35 0:3 / /proc rw,nosuid,nodev,noexec,relatime shared:5 - proc proc rw
+ 16 35 0:14 / /sys rw,nosuid,nodev,noexec,relatime shared:6 - sysfs sysfs rw,seclabel
+ 17 35 0:5 / /dev rw,nosuid shared:2 - devtmpfs devtmpfs rw,seclabel,size=8056484k,nr_inodes=2014121,mode=755
+ 18 16 0:15 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:7 - securityfs securityfs rw
+ 19 16 0:13 / /sys/fs/selinux rw,relatime shared:8 - selinuxfs selinuxfs rw
+ 20 17 0:16 / /dev/shm rw,nosuid,nodev shared:3 - tmpfs tmpfs rw,seclabel
+ 21 17 0:10 / /dev/pts rw,nosuid,noexec,relatime shared:4 - devpts devpts rw,seclabel,gid=5,mode=620,ptmxmode=000
+ 22 35 0:17 / /run rw,nosuid,nodev shared:21 - tmpfs tmpfs rw,seclabel,mode=755
+ 23 16 0:18 / /sys/fs/cgroup rw,nosuid,nodev,noexec shared:9 - tmpfs tmpfs rw,seclabel,mode=755
+ 24 23 0:19 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd
+ 25 16 0:20 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:20 - pstore pstore rw
+ 26 23 0:21 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,cpuset,clone_children
+ 27 23 0:22 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuacct,cpu,clone_children
+ 28 23 0:23 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,memory,clone_children
+ 29 23 0:24 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,devices,clone_children
+ 30 23 0:25 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,freezer,clone_children
+ 31 23 0:26 / /sys/fs/cgroup/net_cls rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,net_cls,clone_children
+ 32 23 0:27 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,blkio,clone_children
+ 33 23 0:28 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,perf_event,clone_children
+ 34 23 0:29 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,hugetlb,clone_children
+ 35 1 253:2 / / rw,relatime shared:1 - ext4 /dev/mapper/ssd-root--f20 rw,seclabel,data=ordered
+ 36 15 0:30 / /proc/sys/fs/binfmt_misc rw,relatime shared:22 - autofs systemd-1 rw,fd=38,pgrp=1,timeout=300,minproto=5,maxproto=5,direct
+ 37 17 0:12 / /dev/mqueue rw,relatime shared:23 - mqueue mqueue rw,seclabel
+ 38 35 0:31 / /tmp rw shared:24 - tmpfs tmpfs rw,seclabel
+ 39 17 0:32 / /dev/hugepages rw,relatime shared:25 - hugetlbfs hugetlbfs rw,seclabel
+ 40 16 0:7 / /sys/kernel/debug rw,relatime shared:26 - debugfs debugfs rw
+ 41 16 0:33 / /sys/kernel/config rw,relatime shared:27 - configfs configfs rw
+ 42 35 0:34 / /var/lib/nfs/rpc_pipefs rw,relatime shared:28 - rpc_pipefs sunrpc rw
+ 43 15 0:35 / /proc/fs/nfsd rw,relatime shared:29 - nfsd sunrpc rw
+ 45 35 8:17 / /boot rw,relatime shared:30 - ext4 /dev/sdb1 rw,seclabel,data=ordered
+ 46 35 253:4 / /home rw,relatime shared:31 - ext4 /dev/mapper/ssd-home rw,seclabel,data=ordered
+ 47 35 253:5 / /var/lib/libvirt/images rw,noatime,nodiratime shared:32 - ext4 /dev/mapper/ssd-virt rw,seclabel,discard,data=ordered
+ 48 35 253:12 / /mnt/old rw,relatime shared:33 - ext4 /dev/mapper/HelpDeskRHEL6-FedoraRoot rw,seclabel,data=ordered
+ 121 22 0:36 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:104 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000
+ 124 16 0:37 / /sys/fs/fuse/connections rw,relatime shared:107 - fusectl fusectl rw
+ 165 38 253:3 / /tmp/mnt rw,relatime shared:147 - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered
+ 167 35 253:15 / /var/lib/docker/devicemapper/mnt/aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,relatime shared:149 - ext4 /dev/mapper/docker-253:2-425882-aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,seclabel,discard,stripe=16,data=ordered
+ 171 35 253:16 / /var/lib/docker/devicemapper/mnt/c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,relatime shared:153 - ext4 /dev/mapper/docker-253:2-425882-c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,seclabel,discard,stripe=16,data=ordered
+ 175 35 253:17 / /var/lib/docker/devicemapper/mnt/1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,relatime shared:157 - ext4 /dev/mapper/docker-253:2-425882-1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,seclabel,discard,stripe=16,data=ordered
+ 179 35 253:18 / /var/lib/docker/devicemapper/mnt/d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,relatime shared:161 - ext4 /dev/mapper/docker-253:2-425882-d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,seclabel,discard,stripe=16,data=ordered
+ 183 35 253:19 / /var/lib/docker/devicemapper/mnt/6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,relatime shared:165 - ext4 /dev/mapper/docker-253:2-425882-6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,seclabel,discard,stripe=16,data=ordered
+ 187 35 253:20 / /var/lib/docker/devicemapper/mnt/8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,relatime shared:169 - ext4 /dev/mapper/docker-253:2-425882-8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,seclabel,discard,stripe=16,data=ordered
+ 191 35 253:21 / /var/lib/docker/devicemapper/mnt/c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,relatime shared:173 - ext4 /dev/mapper/docker-253:2-425882-c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,seclabel,discard,stripe=16,data=ordered
+ 195 35 253:22 / /var/lib/docker/devicemapper/mnt/2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,relatime shared:177 - ext4 /dev/mapper/docker-253:2-425882-2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,seclabel,discard,stripe=16,data=ordered
+ 199 35 253:23 / /var/lib/docker/devicemapper/mnt/37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,relatime shared:181 - ext4 /dev/mapper/docker-253:2-425882-37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,seclabel,discard,stripe=16,data=ordered
+ 203 35 253:24 / /var/lib/docker/devicemapper/mnt/aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,relatime shared:185 - ext4 /dev/mapper/docker-253:2-425882-aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,seclabel,discard,stripe=16,data=ordered
+ 207 35 253:25 / /var/lib/docker/devicemapper/mnt/928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,relatime shared:189 - ext4 /dev/mapper/docker-253:2-425882-928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,seclabel,discard,stripe=16,data=ordered
+ 211 35 253:26 / /var/lib/docker/devicemapper/mnt/0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,relatime shared:193 - ext4 /dev/mapper/docker-253:2-425882-0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,seclabel,discard,stripe=16,data=ordered
+ 215 35 253:27 / /var/lib/docker/devicemapper/mnt/d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,relatime shared:197 - ext4 /dev/mapper/docker-253:2-425882-d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,seclabel,discard,stripe=16,data=ordered
+ 219 35 253:28 / /var/lib/docker/devicemapper/mnt/bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,relatime shared:201 - ext4 /dev/mapper/docker-253:2-425882-bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,seclabel,discard,stripe=16,data=ordered
+ 223 35 253:29 / /var/lib/docker/devicemapper/mnt/7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,relatime shared:205 - ext4 /dev/mapper/docker-253:2-425882-7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,seclabel,discard,stripe=16,data=ordered
+ 227 35 253:30 / /var/lib/docker/devicemapper/mnt/c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,relatime shared:209 - ext4 /dev/mapper/docker-253:2-425882-c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,seclabel,discard,stripe=16,data=ordered
+ 231 35 253:31 / /var/lib/docker/devicemapper/mnt/8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,relatime shared:213 - ext4 /dev/mapper/docker-253:2-425882-8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,seclabel,discard,stripe=16,data=ordered
+ 235 35 253:32 / /var/lib/docker/devicemapper/mnt/1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,relatime shared:217 - ext4 /dev/mapper/docker-253:2-425882-1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,seclabel,discard,stripe=16,data=ordered
+ 239 35 253:33 / /var/lib/docker/devicemapper/mnt/e9aa60c60128cad1 rw,relatime shared:221 - ext4 /dev/mapper/docker-253:2-425882-e9aa60c60128cad1 rw,seclabel,discard,stripe=16,data=ordered
+ 243 35 253:34 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,relatime shared:225 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,seclabel,discard,stripe=16,data=ordered
+ 247 35 253:35 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,relatime shared:229 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,seclabel,discard,stripe=16,data=ordered
+ 31 21 0:23 / /DATA/foo_bla_bla rw,relatime - cifs //foo/BLA\040BLA\040BLA/ rw,sec=ntlm,cache=loose,unc=\\foo\BLA BLA BLA,username=my_login,domain=mydomain.com,uid=12345678,forceuid,gid=12345678,forcegid,addr=10.1.30.10,file_mode=0755,dir_mode=0755,nounix,rsize=61440,wsize=65536,actimeo=1`
+
+ ubuntuMountInfo = `15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw
+16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw
+17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=1015140k,nr_inodes=253785,mode=755
+18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000
+19 20 0:15 / /run rw,nosuid,noexec,relatime - tmpfs tmpfs rw,size=205044k,mode=755
+20 1 253:0 / / rw,relatime - ext4 /dev/disk/by-label/DOROOT rw,errors=remount-ro,data=ordered
+21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs none rw,size=4k,mode=755
+22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw
+23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw
+24 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw
+25 19 0:18 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k
+26 21 0:19 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset,clone_children
+27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw
+28 21 0:21 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu
+29 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755
+30 15 0:23 / /sys/fs/pstore rw,relatime - pstore none rw
+31 21 0:24 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct
+32 21 0:25 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory
+33 21 0:26 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices
+34 21 0:27 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer
+35 21 0:28 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio
+36 21 0:29 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event
+37 21 0:30 / /sys/fs/cgroup/hugetlb rw,relatime - cgroup cgroup rw,hugetlb
+38 21 0:31 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd
+39 20 0:32 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=caafa54fdc06525
+40 20 0:33 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8-init rw,relatime - aufs none rw,si=caafa54f882b525
+41 20 0:34 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8 rw,relatime - aufs none rw,si=caafa54f8829525
+42 20 0:35 / /var/lib/docker/aufs/mnt/16f4d7e96dd612903f425bfe856762f291ff2e36a8ecd55a2209b7d7cd81c30b rw,relatime - aufs none rw,si=caafa54f882d525
+43 20 0:36 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e-init rw,relatime - aufs none rw,si=caafa54f882f525
+44 20 0:37 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e rw,relatime - aufs none rw,si=caafa54f88ba525
+45 20 0:38 / /var/lib/docker/aufs/mnt/283f35a910233c756409313be71ecd8fcfef0df57108b8d740b61b3e88860452 rw,relatime - aufs none rw,si=caafa54f88b8525
+46 20 0:39 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1-init rw,relatime - aufs none rw,si=caafa54f88be525
+47 20 0:40 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1 rw,relatime - aufs none rw,si=caafa54f882c525
+48 20 0:41 / /var/lib/docker/aufs/mnt/de2b538c97d6366cc80e8658547c923ea1d042f85580df379846f36a4df7049d rw,relatime - aufs none rw,si=caafa54f85bb525
+49 20 0:42 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49-init rw,relatime - aufs none rw,si=caafa54fdc00525
+50 20 0:43 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49 rw,relatime - aufs none rw,si=caafa54fbaec525
+51 20 0:44 / /var/lib/docker/aufs/mnt/6ac1cace985c9fc9bea32234de8b36dba49bdd5e29a2972b327ff939d78a6274 rw,relatime - aufs none rw,si=caafa54f8e1a525
+52 20 0:45 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b-init rw,relatime - aufs none rw,si=caafa54f8e1d525
+53 20 0:46 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b rw,relatime - aufs none rw,si=caafa54f8e1b525
+54 20 0:47 / /var/lib/docker/aufs/mnt/cabb117d997f0f93519185aea58389a9762770b7496ed0b74a3e4a083fa45902 rw,relatime - aufs none rw,si=caafa54f810a525
+55 20 0:48 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33-init rw,relatime - aufs none rw,si=caafa54f8529525
+56 20 0:49 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33 rw,relatime - aufs none rw,si=caafa54f852f525
+57 20 0:50 / /var/lib/docker/aufs/mnt/16a1526fa445b84ce84f89506d219e87fa488a814063baf045d88b02f21166b3 rw,relatime - aufs none rw,si=caafa54f9e1d525
+58 20 0:51 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f-init rw,relatime - aufs none rw,si=caafa54f854d525
+59 20 0:52 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f rw,relatime - aufs none rw,si=caafa54f854e525
+60 20 0:53 / /var/lib/docker/aufs/mnt/e370c3e286bea027917baa0e4d251262681a472a87056e880dfd0513516dffd9 rw,relatime - aufs none rw,si=caafa54f840a525
+61 20 0:54 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e-init rw,relatime - aufs none rw,si=caafa54f8408525
+62 20 0:55 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e rw,relatime - aufs none rw,si=caafa54f8409525
+63 20 0:56 / /var/lib/docker/aufs/mnt/abd0b5ea5d355a67f911475e271924a5388ee60c27185fcd60d095afc4a09dc7 rw,relatime - aufs none rw,si=caafa54f9eb1525
+64 20 0:57 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2-init rw,relatime - aufs none rw,si=caafa54f85bf525
+65 20 0:58 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2 rw,relatime - aufs none rw,si=caafa54f85b8525
+66 20 0:59 / /var/lib/docker/aufs/mnt/912e1bf28b80a09644503924a8a1a4fb8ed10b808ca847bda27a369919aa52fa rw,relatime - aufs none rw,si=caafa54fbaea525
+67 20 0:60 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576-init rw,relatime - aufs none rw,si=caafa54f8472525
+68 20 0:61 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576 rw,relatime - aufs none rw,si=caafa54f8474525
+69 20 0:62 / /var/lib/docker/aufs/mnt/5aaebb79ef3097dfca377889aeb61a0c9d5e3795117d2b08d0751473c671dfb2 rw,relatime - aufs none rw,si=caafa54f8c5e525
+70 20 0:63 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2-init rw,relatime - aufs none rw,si=caafa54f8c3b525
+71 20 0:64 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2 rw,relatime - aufs none rw,si=caafa54f8c3d525
+72 20 0:65 / /var/lib/docker/aufs/mnt/2777f0763da4de93f8bebbe1595cc77f739806a158657b033eca06f827b6028a rw,relatime - aufs none rw,si=caafa54f8c3e525
+73 20 0:66 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e-init rw,relatime - aufs none rw,si=caafa54f8c39525
+74 20 0:67 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e rw,relatime - aufs none rw,si=caafa54f854f525
+75 20 0:68 / /var/lib/docker/aufs/mnt/06400b526ec18b66639c96efc41a84f4ae0b117cb28dafd56be420651b4084a0 rw,relatime - aufs none rw,si=caafa54f840b525
+76 20 0:69 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785-init rw,relatime - aufs none rw,si=caafa54fdddf525
+77 20 0:70 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785 rw,relatime - aufs none rw,si=caafa54f854b525
+78 20 0:71 / /var/lib/docker/aufs/mnt/1ff414fa93fd61ec81b0ab7b365a841ff6545accae03cceac702833aaeaf718f rw,relatime - aufs none rw,si=caafa54f8d85525
+79 20 0:72 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8-init rw,relatime - aufs none rw,si=caafa54f8da3525
+80 20 0:73 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8 rw,relatime - aufs none rw,si=caafa54f8da2525
+81 20 0:74 / /var/lib/docker/aufs/mnt/b68b1d4fe4d30016c552398e78b379a39f651661d8e1fa5f2460c24a5e723420 rw,relatime - aufs none rw,si=caafa54f8d81525
+82 20 0:75 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739-init rw,relatime - aufs none rw,si=caafa54f8da1525
+83 20 0:76 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739 rw,relatime - aufs none rw,si=caafa54f8da0525
+84 20 0:77 / /var/lib/docker/aufs/mnt/53e10b0329afc0e0d3322d31efaed4064139dc7027fe6ae445cffd7104bcc94f rw,relatime - aufs none rw,si=caafa54f8c35525
+85 20 0:78 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494-init rw,relatime - aufs none rw,si=caafa54f8db8525
+86 20 0:79 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494 rw,relatime - aufs none rw,si=caafa54f8dba525
+87 20 0:80 / /var/lib/docker/aufs/mnt/90fdd2c03eeaf65311f88f4200e18aef6d2772482712d9aea01cd793c64781b5 rw,relatime - aufs none rw,si=caafa54f8315525
+88 20 0:81 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f-init rw,relatime - aufs none rw,si=caafa54f8fc6525
+89 20 0:82 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f rw,relatime - aufs none rw,si=caafa54f8468525
+90 20 0:83 / /var/lib/docker/aufs/mnt/8cf9a993f50f3305abad3da268c0fc44ff78a1e7bba595ef9de963497496c3f9 rw,relatime - aufs none rw,si=caafa54f8c59525
+91 20 0:84 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173-init rw,relatime - aufs none rw,si=caafa54f846a525
+92 20 0:85 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173 rw,relatime - aufs none rw,si=caafa54f846b525
+93 20 0:86 / /var/lib/docker/aufs/mnt/d8c8288ec920439a48b5796bab5883ee47a019240da65e8d8f33400c31bac5df rw,relatime - aufs none rw,si=caafa54f8dbf525
+94 20 0:87 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6-init rw,relatime - aufs none rw,si=caafa54f810f525
+95 20 0:88 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6 rw,relatime - aufs none rw,si=caafa54fbae9525
+96 20 0:89 / /var/lib/docker/aufs/mnt/befc1c67600df449dddbe796c0d06da7caff1d2bbff64cde1f0ba82d224996b5 rw,relatime - aufs none rw,si=caafa54f8dab525
+97 20 0:90 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562-init rw,relatime - aufs none rw,si=caafa54fdc02525
+98 20 0:91 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562 rw,relatime - aufs none rw,si=caafa54f9eb0525
+99 20 0:92 / /var/lib/docker/aufs/mnt/2a31f10029f04ff9d4381167a9b739609853d7220d55a56cb654779a700ee246 rw,relatime - aufs none rw,si=caafa54f8c37525
+100 20 0:93 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927-init rw,relatime - aufs none rw,si=caafa54fd173525
+101 20 0:94 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927 rw,relatime - aufs none rw,si=caafa54f8108525
+102 20 0:95 / /var/lib/docker/aufs/mnt/eaa0f57403a3dc685268f91df3fbcd7a8423cee50e1a9ee5c3e1688d9d676bb4 rw,relatime - aufs none rw,si=caafa54f852d525
+103 20 0:96 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b-init rw,relatime - aufs none rw,si=caafa54f8d80525
+104 20 0:97 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b rw,relatime - aufs none rw,si=caafa54f8fc3525
+105 20 0:98 / /var/lib/docker/aufs/mnt/d1b322ae17613c6adee84e709641a9244ac56675244a89a64dc0075075fcbb83 rw,relatime - aufs none rw,si=caafa54f8c58525
+106 20 0:99 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd-init rw,relatime - aufs none rw,si=caafa54f8c63525
+107 20 0:100 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd rw,relatime - aufs none rw,si=caafa54f8c67525
+108 20 0:101 / /var/lib/docker/aufs/mnt/bc9d2a264158f83a617a069bf17cbbf2a2ba453db7d3951d9dc63cc1558b1c2b rw,relatime - aufs none rw,si=caafa54f8dbe525
+109 20 0:102 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99-init rw,relatime - aufs none rw,si=caafa54f9e0d525
+110 20 0:103 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99 rw,relatime - aufs none rw,si=caafa54f9e1b525
+111 20 0:104 / /var/lib/docker/aufs/mnt/d4dca7b02569c732e740071e1c654d4ad282de5c41edb619af1f0aafa618be26 rw,relatime - aufs none rw,si=caafa54f8dae525
+112 20 0:105 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7-init rw,relatime - aufs none rw,si=caafa54f8c5c525
+113 20 0:106 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7 rw,relatime - aufs none rw,si=caafa54fd172525
+114 20 0:107 / /var/lib/docker/aufs/mnt/e60c57499c0b198a6734f77f660cdbbd950a5b78aa23f470ca4f0cfcc376abef rw,relatime - aufs none rw,si=caafa54909c4525
+115 20 0:108 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35-init rw,relatime - aufs none rw,si=caafa54909c3525
+116 20 0:109 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35 rw,relatime - aufs none rw,si=caafa54909c7525
+117 20 0:110 / /var/lib/docker/aufs/mnt/2997be666d58b9e71469759bcb8bd9608dad0e533a1a7570a896919ba3388825 rw,relatime - aufs none rw,si=caafa54f8557525
+118 20 0:111 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93-init rw,relatime - aufs none rw,si=caafa54c6e88525
+119 20 0:112 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93 rw,relatime - aufs none rw,si=caafa54c6e8e525
+120 20 0:113 / /var/lib/docker/aufs/mnt/a672a1e2f2f051f6e19ed1dfbe80860a2d774174c49f7c476695f5dd1d5b2f67 rw,relatime - aufs none rw,si=caafa54c6e15525
+121 20 0:114 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420-init rw,relatime - aufs none rw,si=caafa54f8dad525
+122 20 0:115 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420 rw,relatime - aufs none rw,si=caafa54f8d84525
+123 20 0:116 / /var/lib/docker/aufs/mnt/2abc86007aca46fb4a817a033e2a05ccacae40b78ea4b03f8ea616b9ada40e2e rw,relatime - aufs none rw,si=caafa54c6e8b525
+124 20 0:117 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374-init rw,relatime - aufs none rw,si=caafa54c6e8d525
+125 20 0:118 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374 rw,relatime - aufs none rw,si=caafa54f8c34525
+126 20 0:119 / /var/lib/docker/aufs/mnt/2f95ca1a629cea8363b829faa727dd52896d5561f2c96ddee4f697ea2fc872c2 rw,relatime - aufs none rw,si=caafa54c6e8a525
+127 20 0:120 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2-init rw,relatime - aufs none rw,si=caafa54f8e19525
+128 20 0:121 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2 rw,relatime - aufs none rw,si=caafa54fa8c6525
+129 20 0:122 / /var/lib/docker/aufs/mnt/c1d04dfdf8cccb3676d5a91e84e9b0781ce40623d127d038bcfbe4c761b27401 rw,relatime - aufs none rw,si=caafa54f8c30525
+130 20 0:123 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a-init rw,relatime - aufs none rw,si=caafa54c6e1a525
+131 20 0:124 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a rw,relatime - aufs none rw,si=caafa54c6e1c525
+132 20 0:125 / /var/lib/docker/aufs/mnt/5ae3b6fccb1539fc02d420e86f3e9637bef5b711fed2ca31a2f426c8f5deddbf rw,relatime - aufs none rw,si=caafa54c4fea525
+133 20 0:126 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0-init rw,relatime - aufs none rw,si=caafa54c6e1e525
+134 20 0:127 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0 rw,relatime - aufs none rw,si=caafa54fa8c0525
+135 20 0:128 / /var/lib/docker/aufs/mnt/f382bd5aaccaf2d04a59089ac7cb12ec87efd769fd0c14d623358fbfd2a3f896 rw,relatime - aufs none rw,si=caafa54c4fec525
+136 20 0:129 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735-init rw,relatime - aufs none rw,si=caafa54c4fef525
+137 20 0:130 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735 rw,relatime - aufs none rw,si=caafa54c4feb525
+138 20 0:131 / /var/lib/docker/aufs/mnt/a9c5ee0854dc083b6bf62b7eb1e5291aefbb10702289a446471ce73aba0d5d7d rw,relatime - aufs none rw,si=caafa54909c6525
+139 20 0:134 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0-init rw,relatime - aufs none rw,si=caafa54804fe525
+140 20 0:135 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0 rw,relatime - aufs none rw,si=caafa54804fa525
+141 20 0:136 / /var/lib/docker/aufs/mnt/7ec3277e5c04c907051caf9c9c35889f5fcd6463e5485971b25404566830bb70 rw,relatime - aufs none rw,si=caafa54804f9525
+142 20 0:139 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8-init rw,relatime - aufs none rw,si=caafa54c6ef6525
+143 20 0:140 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8 rw,relatime - aufs none rw,si=caafa54c6ef5525
+144 20 0:356 / /var/lib/docker/aufs/mnt/e6ecde9e2c18cd3c75f424c67b6d89685cfee0fc67abf2cb6bdc0867eb998026 rw,relatime - aufs none rw,si=caafa548068e525`
+
+ gentooMountinfo = `15 1 8:6 / / rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered
+16 15 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw
+17 15 0:14 / /run rw,nosuid,nodev,relatime - tmpfs tmpfs rw,size=3292172k,mode=755
+18 15 0:5 / /dev rw,nosuid,relatime - devtmpfs udev rw,size=10240k,nr_inodes=4106451,mode=755
+19 18 0:12 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw
+20 18 0:10 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000
+21 18 0:15 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw
+22 15 0:16 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw
+23 22 0:7 / /sys/kernel/debug rw,nosuid,nodev,noexec,relatime - debugfs debugfs rw
+24 22 0:17 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs cgroup_root rw,size=10240k,mode=755
+25 24 0:18 / /sys/fs/cgroup/openrc rw,nosuid,nodev,noexec,relatime - cgroup openrc rw,release_agent=/lib64/rc/sh/cgroup-release-agent.sh,name=openrc
+26 24 0:19 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cpuset rw,cpuset,clone_children
+27 24 0:20 / /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cpu rw,cpu,clone_children
+28 24 0:21 / /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cpuacct rw,cpuacct,clone_children
+29 24 0:22 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup memory rw,memory,clone_children
+30 24 0:23 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup devices rw,devices,clone_children
+31 24 0:24 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup freezer rw,freezer,clone_children
+32 24 0:25 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup blkio rw,blkio,clone_children
+33 15 8:1 / /boot rw,noatime,nodiratime - vfat /dev/sda1 rw,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro
+34 15 8:18 / /mnt/xfs rw,noatime,nodiratime - xfs /dev/sdb2 rw,attr2,inode64,noquota
+35 15 0:26 / /tmp rw,relatime - tmpfs tmpfs rw
+36 16 0:27 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw
+42 15 0:33 / /var/lib/nfs/rpc_pipefs rw,relatime - rpc_pipefs rpc_pipefs rw
+43 16 0:34 / /proc/fs/nfsd rw,nosuid,nodev,noexec,relatime - nfsd nfsd rw
+44 15 0:35 / /home/tianon/.gvfs rw,nosuid,nodev,relatime - fuse.gvfs-fuse-daemon gvfs-fuse-daemon rw,user_id=1000,group_id=1000
+68 15 0:3336 / /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd rw,relatime - aufs none rw,si=9b4a7640128db39c
+86 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/config.env /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerenv rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered
+87 68 8:6 /etc/resolv.conf /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/resolv.conf rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered
+88 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hostname /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hostname rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered
+89 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hosts /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hosts rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered
+38 15 0:3384 / /var/lib/docker/aufs/mnt/0292005a9292401bb5197657f2b682d97d8edcb3b72b5e390d2a680139985b55 rw,relatime - aufs none rw,si=9b4a7642b584939c
+39 15 0:3385 / /var/lib/docker/aufs/mnt/59db98c889de5f71b70cfb82c40cbe47b64332f0f56042a2987a9e5df6e5e3aa rw,relatime - aufs none rw,si=9b4a7642b584e39c
+40 15 0:3386 / /var/lib/docker/aufs/mnt/0545f0f2b6548eb9601d08f35a08f5a0a385407d36027a28f58e06e9f61e0278 rw,relatime - aufs none rw,si=9b4a7642b584b39c
+41 15 0:3387 / /var/lib/docker/aufs/mnt/d882cfa16d1aa8fe0331a36e79be3d80b151e49f24fc39a39c3fed1735d5feb5 rw,relatime - aufs none rw,si=9b4a76453040039c
+45 15 0:3388 / /var/lib/docker/aufs/mnt/055ca3befcb1626e74f5344b3398724ff05c0de0e20021683d04305c9e70a3f6 rw,relatime - aufs none rw,si=9b4a76453040739c
+46 15 0:3389 / /var/lib/docker/aufs/mnt/b899e4567a351745d4285e7f1c18fdece75d877deb3041981cd290be348b7aa6 rw,relatime - aufs none rw,si=9b4a7647def4039c
+47 15 0:3390 / /var/lib/docker/aufs/mnt/067ca040292c58954c5129f953219accfae0d40faca26b4d05e76ca76a998f16 rw,relatime - aufs none rw,si=9b4a7647def4239c
+48 15 0:3391 / /var/lib/docker/aufs/mnt/8c995e7cb6e5082742daeea720e340b021d288d25d92e0412c03d200df308a11 rw,relatime - aufs none rw,si=9b4a764479c1639c
+49 15 0:3392 / /var/lib/docker/aufs/mnt/07cc54dfae5b45300efdacdd53cc72c01b9044956a86ce7bff42d087e426096d rw,relatime - aufs none rw,si=9b4a764479c1739c
+50 15 0:3393 / /var/lib/docker/aufs/mnt/0a9c95cf4c589c05b06baa79150b0cc1d8e7102759fe3ce4afaabb8247ca4f85 rw,relatime - aufs none rw,si=9b4a7644059c839c
+51 15 0:3394 / /var/lib/docker/aufs/mnt/468fa98cececcf4e226e8370f18f4f848d63faf287fb8321a07f73086441a3a0 rw,relatime - aufs none rw,si=9b4a7644059ca39c
+52 15 0:3395 / /var/lib/docker/aufs/mnt/0b826192231c5ce066fffb5beff4397337b5fc19a377aa7c6282c7c0ce7f111f rw,relatime - aufs none rw,si=9b4a764479c1339c
+53 15 0:3396 / /var/lib/docker/aufs/mnt/93b8ba1b772fbe79709b909c43ea4b2c30d712e53548f467db1ffdc7a384f196 rw,relatime - aufs none rw,si=9b4a7640798a739c
+54 15 0:3397 / /var/lib/docker/aufs/mnt/0c0d0acfb506859b12ef18cdfef9ebed0b43a611482403564224bde9149d373c rw,relatime - aufs none rw,si=9b4a7640798a039c
+55 15 0:3398 / /var/lib/docker/aufs/mnt/33648c39ab6c7c74af0243d6d6a81b052e9e25ad1e04b19892eb2dde013e358b rw,relatime - aufs none rw,si=9b4a7644b439b39c
+56 15 0:3399 / /var/lib/docker/aufs/mnt/0c12bea97a1c958a3c739fb148536c1c89351d48e885ecda8f0499b5cc44407e rw,relatime - aufs none rw,si=9b4a7640798a239c
+57 15 0:3400 / /var/lib/docker/aufs/mnt/ed443988ce125f172d7512e84a4de2627405990fd767a16adefa8ce700c19ce8 rw,relatime - aufs none rw,si=9b4a7644c8ed339c
+59 15 0:3402 / /var/lib/docker/aufs/mnt/f61612c324ff3c924d3f7a82fb00a0f8d8f73c248c41897061949e9f5ab7e3b1 rw,relatime - aufs none rw,si=9b4a76442810c39c
+60 15 0:3403 / /var/lib/docker/aufs/mnt/0f1ee55c6c4e25027b80de8e64b8b6fb542b3b41aa0caab9261da75752e22bfd rw,relatime - aufs none rw,si=9b4a76442810e39c
+61 15 0:3404 / /var/lib/docker/aufs/mnt/956f6cc4af5785cb3ee6963dcbca668219437d9b28f513290b1453ac64a34f97 rw,relatime - aufs none rw,si=9b4a7644303ec39c
+62 15 0:3405 / /var/lib/docker/aufs/mnt/1099769158c4b4773e2569e38024e8717e400f87a002c41d8cf47cb81b051ba6 rw,relatime - aufs none rw,si=9b4a7644303ee39c
+63 15 0:3406 / /var/lib/docker/aufs/mnt/11890ceb98d4442595b676085cd7b21550ab85c5df841e0fba997ff54e3d522d rw,relatime - aufs none rw,si=9b4a7644303ed39c
+64 15 0:3407 / /var/lib/docker/aufs/mnt/acdb90dc378e8ed2420b43a6d291f1c789a081cd1904018780cc038fcd7aae53 rw,relatime - aufs none rw,si=9b4a76434be2139c
+65 15 0:3408 / /var/lib/docker/aufs/mnt/120e716f19d4714fbe63cc1ed246204f2c1106eefebc6537ba2587d7e7711959 rw,relatime - aufs none rw,si=9b4a76434be2339c
+66 15 0:3409 / /var/lib/docker/aufs/mnt/b197b7fffb61d89e0ba1c40de9a9fc0d912e778b3c1bd828cf981ff37c1963bc rw,relatime - aufs none rw,si=9b4a76434be2039c
+70 15 0:3412 / /var/lib/docker/aufs/mnt/1434b69d2e1bb18a9f0b96b9cdac30132b2688f5d1379f68a39a5e120c2f93eb rw,relatime - aufs none rw,si=9b4a76434be2639c
+71 15 0:3413 / /var/lib/docker/aufs/mnt/16006e83caf33ab5eb0cd6afc92ea2ee8edeff897496b0bb3ec3a75b767374b3 rw,relatime - aufs none rw,si=9b4a7644d790439c
+72 15 0:3414 / /var/lib/docker/aufs/mnt/55bfa5f44e94d27f91f79ba901b118b15098449165c87abf1b53ffff147ff164 rw,relatime - aufs none rw,si=9b4a7644d790239c
+73 15 0:3415 / /var/lib/docker/aufs/mnt/1912b97a07ab21ccd98a2a27bc779bf3cf364a3138afa3c3e6f7f169a3c3eab5 rw,relatime - aufs none rw,si=9b4a76441822739c
+76 15 0:3418 / /var/lib/docker/aufs/mnt/1a7c3292e8879bd91ffd9282e954f643b1db5683093574c248ff14a9609f2f56 rw,relatime - aufs none rw,si=9b4a76438cb7239c
+77 15 0:3419 / /var/lib/docker/aufs/mnt/bb1faaf0d076ddba82c2318305a85f490dafa4e8a8640a8db8ed657c439120cc rw,relatime - aufs none rw,si=9b4a76438cb7339c
+78 15 0:3420 / /var/lib/docker/aufs/mnt/1ab869f21d2241a73ac840c7f988490313f909ac642eba71d092204fec66dd7c rw,relatime - aufs none rw,si=9b4a76438cb7639c
+79 15 0:3421 / /var/lib/docker/aufs/mnt/fd7245b2cfe3890fa5f5b452260e4edf9e7fb7746532ed9d83f7a0d7dbaa610e rw,relatime - aufs none rw,si=9b4a7644bdc0139c
+80 15 0:3422 / /var/lib/docker/aufs/mnt/1e5686c5301f26b9b3cd24e322c608913465cc6c5d0dcd7c5e498d1314747d61 rw,relatime - aufs none rw,si=9b4a7644bdc0639c
+81 15 0:3423 / /var/lib/docker/aufs/mnt/52edf6ee6e40bfec1e9301a4d4a92ab83d144e2ae4ce5099e99df6138cb844bf rw,relatime - aufs none rw,si=9b4a7644bdc0239c
+82 15 0:3424 / /var/lib/docker/aufs/mnt/1ea10fb7085d28cda4904657dff0454e52598d28e1d77e4f2965bbc3666e808f rw,relatime - aufs none rw,si=9b4a76438cb7139c
+83 15 0:3425 / /var/lib/docker/aufs/mnt/9c03e98c3593946dbd4087f8d83f9ca262f4a2efdc952ce60690838b9ba6c526 rw,relatime - aufs none rw,si=9b4a76443020639c
+84 15 0:3426 / /var/lib/docker/aufs/mnt/220a2344d67437602c6d2cee9a98c46be13f82c2a8063919dd2fad52bf2fb7dd rw,relatime - aufs none rw,si=9b4a76434bff339c
+94 15 0:3427 / /var/lib/docker/aufs/mnt/3b32876c5b200312c50baa476ff342248e88c8ea96e6a1032cd53a88738a1cf2 rw,relatime - aufs none rw,si=9b4a76434bff139c
+95 15 0:3428 / /var/lib/docker/aufs/mnt/23ee2b8b0d4ae8db6f6d1e168e2c6f79f8a18f953b09f65e0d22cc1e67a3a6fa rw,relatime - aufs none rw,si=9b4a7646c305c39c
+96 15 0:3429 / /var/lib/docker/aufs/mnt/e86e6daa70b61b57945fa178222615f3c3d6bcef12c9f28e9f8623d44dc2d429 rw,relatime - aufs none rw,si=9b4a7646c305f39c
+97 15 0:3430 / /var/lib/docker/aufs/mnt/2413d07623e80860bb2e9e306fbdee699afd07525785c025c591231e864aa162 rw,relatime - aufs none rw,si=9b4a76434bff039c
+98 15 0:3431 / /var/lib/docker/aufs/mnt/adfd622eb22340fc80b429e5564b125668e260bf9068096c46dd59f1386a4b7d rw,relatime - aufs none rw,si=9b4a7646a7a1039c
+102 15 0:3435 / /var/lib/docker/aufs/mnt/27cd92e7a91d02e2d6b44d16679a00fb6d169b19b88822891084e7fd1a84882d rw,relatime - aufs none rw,si=9b4a7646f25ec39c
+103 15 0:3436 / /var/lib/docker/aufs/mnt/27dfdaf94cfbf45055c748293c37dd68d9140240bff4c646cb09216015914a88 rw,relatime - aufs none rw,si=9b4a7646732f939c
+104 15 0:3437 / /var/lib/docker/aufs/mnt/5ed7524aff68dfbf0fc601cbaeac01bab14391850a973dabf3653282a627920f rw,relatime - aufs none rw,si=9b4a7646732f839c
+105 15 0:3438 / /var/lib/docker/aufs/mnt/2a0d4767e536beb5785b60e071e3ac8e5e812613ab143a9627bee77d0c9ab062 rw,relatime - aufs none rw,si=9b4a7646732fe39c
+106 15 0:3439 / /var/lib/docker/aufs/mnt/dea3fc045d9f4ae51ba952450b948a822cf85c39411489ca5224f6d9a8d02bad rw,relatime - aufs none rw,si=9b4a764012ad839c
+107 15 0:3440 / /var/lib/docker/aufs/mnt/2d140a787160798da60cb67c21b1210054ad4dafecdcf832f015995b9aa99cfd rw,relatime - aufs none rw,si=9b4a764012add39c
+108 15 0:3441 / /var/lib/docker/aufs/mnt/cb190b2a8e984475914430fbad2382e0d20b9b659f8ef83ae8d170cc672e519c rw,relatime - aufs none rw,si=9b4a76454d9c239c
+109 15 0:3442 / /var/lib/docker/aufs/mnt/2f4a012d5a7ffd90256a6e9aa479054b3dddbc3c6a343f26dafbf3196890223b rw,relatime - aufs none rw,si=9b4a76454d9c439c
+110 15 0:3443 / /var/lib/docker/aufs/mnt/63cc77904b80c4ffbf49cb974c5d8733dc52ad7640d3ae87554b325d7312d87f rw,relatime - aufs none rw,si=9b4a76454d9c339c
+111 15 0:3444 / /var/lib/docker/aufs/mnt/30333e872c451482ea2d235ff2192e875bd234006b238ae2bdde3b91a86d7522 rw,relatime - aufs none rw,si=9b4a76422cebf39c
+112 15 0:3445 / /var/lib/docker/aufs/mnt/6c54fc1125da3925cae65b5c9a98f3be55b0a2c2666082e5094a4ba71beb5bff rw,relatime - aufs none rw,si=9b4a7646dd5a439c
+113 15 0:3446 / /var/lib/docker/aufs/mnt/3087d48cb01cda9d0a83a9ca301e6ea40e8593d18c4921be4794c91a420ab9a3 rw,relatime - aufs none rw,si=9b4a7646dd5a739c
+114 15 0:3447 / /var/lib/docker/aufs/mnt/cc2607462a8f55b179a749b144c3fdbb50678e1a4f3065ea04e283e9b1f1d8e2 rw,relatime - aufs none rw,si=9b4a7646dd5a239c
+117 15 0:3450 / /var/lib/docker/aufs/mnt/310c5e8392b29e8658a22e08d96d63936633b7e2c38e8d220047928b00a03d24 rw,relatime - aufs none rw,si=9b4a7647932d739c
+118 15 0:3451 / /var/lib/docker/aufs/mnt/38a1f0029406ba9c3b6058f2f406d8a1d23c855046cf355c91d87d446fcc1460 rw,relatime - aufs none rw,si=9b4a76445abc939c
+119 15 0:3452 / /var/lib/docker/aufs/mnt/42e109ab7914ae997a11ccd860fd18e4d488c50c044c3240423ce15774b8b62e rw,relatime - aufs none rw,si=9b4a76445abca39c
+120 15 0:3453 / /var/lib/docker/aufs/mnt/365d832af0402d052b389c1e9c0d353b48487533d20cd4351df8e24ec4e4f9d8 rw,relatime - aufs none rw,si=9b4a7644066aa39c
+121 15 0:3454 / /var/lib/docker/aufs/mnt/d3fa8a24d695b6cda9b64f96188f701963d28bef0473343f8b212df1a2cf1d2b rw,relatime - aufs none rw,si=9b4a7644066af39c
+122 15 0:3455 / /var/lib/docker/aufs/mnt/37d4f491919abc49a15d0c7a7cc8383f087573525d7d288accd14f0b4af9eae0 rw,relatime - aufs none rw,si=9b4a7644066ad39c
+123 15 0:3456 / /var/lib/docker/aufs/mnt/93902707fe12cbdd0068ce73f2baad4b3a299189b1b19cb5f8a2025e106ae3f5 rw,relatime - aufs none rw,si=9b4a76444445f39c
+126 15 0:3459 / /var/lib/docker/aufs/mnt/3b49291670a625b9bbb329ffba99bf7fa7abff80cefef040f8b89e2b3aad4f9f rw,relatime - aufs none rw,si=9b4a7640798a339c
+127 15 0:3460 / /var/lib/docker/aufs/mnt/8d9c7b943cc8f854f4d0d4ec19f7c16c13b0cc4f67a41472a072648610cecb59 rw,relatime - aufs none rw,si=9b4a76427383039c
+128 15 0:3461 / /var/lib/docker/aufs/mnt/3b6c90036526c376307df71d49c9f5fce334c01b926faa6a78186842de74beac rw,relatime - aufs none rw,si=9b4a7644badd439c
+130 15 0:3463 / /var/lib/docker/aufs/mnt/7b24158eeddfb5d31b7e932e406ea4899fd728344335ff8e0765e89ddeb351dd rw,relatime - aufs none rw,si=9b4a7644badd539c
+131 15 0:3464 / /var/lib/docker/aufs/mnt/3ead6dd5773765c74850cf6c769f21fe65c29d622ffa712664f9f5b80364ce27 rw,relatime - aufs none rw,si=9b4a7642f469939c
+132 15 0:3465 / /var/lib/docker/aufs/mnt/3f825573b29547744a37b65597a9d6d15a8350be4429b7038d126a4c9a8e178f rw,relatime - aufs none rw,si=9b4a7642f469c39c
+133 15 0:3466 / /var/lib/docker/aufs/mnt/f67aaaeb3681e5dcb99a41f847087370bd1c206680cb8c7b6a9819fd6c97a331 rw,relatime - aufs none rw,si=9b4a7647cc25939c
+134 15 0:3467 / /var/lib/docker/aufs/mnt/41afe6cfb3c1fc2280b869db07699da88552786e28793f0bc048a265c01bd942 rw,relatime - aufs none rw,si=9b4a7647cc25c39c
+135 15 0:3468 / /var/lib/docker/aufs/mnt/b8092ea59da34a40b120e8718c3ae9fa8436996edc4fc50e4b99c72dfd81e1af rw,relatime - aufs none rw,si=9b4a76445abc439c
+136 15 0:3469 / /var/lib/docker/aufs/mnt/42c69d2cc179e2684458bb8596a9da6dad182c08eae9b74d5f0e615b399f75a5 rw,relatime - aufs none rw,si=9b4a76455ddbe39c
+137 15 0:3470 / /var/lib/docker/aufs/mnt/ea0871954acd2d62a211ac60e05969622044d4c74597870c4f818fbb0c56b09b rw,relatime - aufs none rw,si=9b4a76455ddbf39c
+138 15 0:3471 / /var/lib/docker/aufs/mnt/4307906b275ab3fc971786b3841ae3217ac85b6756ddeb7ad4ba09cd044c2597 rw,relatime - aufs none rw,si=9b4a76455ddb839c
+139 15 0:3472 / /var/lib/docker/aufs/mnt/4390b872928c53500a5035634f3421622ed6299dc1472b631fc45de9f56dc180 rw,relatime - aufs none rw,si=9b4a76402f2fd39c
+140 15 0:3473 / /var/lib/docker/aufs/mnt/6bb41e78863b85e4aa7da89455314855c8c3bda64e52a583bab15dc1fa2e80c2 rw,relatime - aufs none rw,si=9b4a76402f2fa39c
+141 15 0:3474 / /var/lib/docker/aufs/mnt/4444f583c2a79c66608f4673a32c9c812154f027045fbd558c2d69920c53f835 rw,relatime - aufs none rw,si=9b4a764479dbd39c
+142 15 0:3475 / /var/lib/docker/aufs/mnt/6f11883af4a05ea362e0c54df89058da4859f977efd07b6f539e1f55c1d2a668 rw,relatime - aufs none rw,si=9b4a76402f30b39c
+143 15 0:3476 / /var/lib/docker/aufs/mnt/453490dd32e7c2e9ef906f995d8fb3c2753923d1a5e0ba3fd3296e2e4dc238e7 rw,relatime - aufs none rw,si=9b4a76402f30c39c
+144 15 0:3477 / /var/lib/docker/aufs/mnt/45e5945735ee102b5e891c91650c57ec4b52bb53017d68f02d50ea8a6e230610 rw,relatime - aufs none rw,si=9b4a76423260739c
+147 15 0:3480 / /var/lib/docker/aufs/mnt/4727a64a5553a1125f315b96bed10d3073d6988225a292cce732617c925b56ab rw,relatime - aufs none rw,si=9b4a76443030339c
+150 15 0:3483 / /var/lib/docker/aufs/mnt/4e348b5187b9a567059306afc72d42e0ec5c893b0d4abd547526d5f9b6fb4590 rw,relatime - aufs none rw,si=9b4a7644f5d8c39c
+151 15 0:3484 / /var/lib/docker/aufs/mnt/4efc616bfbc3f906718b052da22e4335f8e9f91ee9b15866ed3a8029645189ef rw,relatime - aufs none rw,si=9b4a7644f5d8939c
+152 15 0:3485 / /var/lib/docker/aufs/mnt/83e730ae9754d5adb853b64735472d98dfa17136b8812ac9cfcd1eba7f4e7d2d rw,relatime - aufs none rw,si=9b4a76469aa7139c
+153 15 0:3486 / /var/lib/docker/aufs/mnt/4fc5ba8a5b333be2b7eefacccb626772eeec0ae8a6975112b56c9fb36c0d342f rw,relatime - aufs none rw,si=9b4a7640128dc39c
+154 15 0:3487 / /var/lib/docker/aufs/mnt/50200d5edff5dfe8d1ef3c78b0bbd709793ac6e936aa16d74ff66f7ea577b6f9 rw,relatime - aufs none rw,si=9b4a7640128da39c
+155 15 0:3488 / /var/lib/docker/aufs/mnt/51e5e51604361448f0b9777f38329f414bc5ba9cf238f26d465ff479bd574b61 rw,relatime - aufs none rw,si=9b4a76444f68939c
+156 15 0:3489 / /var/lib/docker/aufs/mnt/52a142149aa98bba83df8766bbb1c629a97b9799944ead90dd206c4bdf0b8385 rw,relatime - aufs none rw,si=9b4a76444f68b39c
+157 15 0:3490 / /var/lib/docker/aufs/mnt/52dd21a94a00f58a1ed489312fcfffb91578089c76c5650364476f1d5de031bc rw,relatime - aufs none rw,si=9b4a76444f68f39c
+158 15 0:3491 / /var/lib/docker/aufs/mnt/ee562415ddaad353ed22c88d0ca768a0c74bfba6333b6e25c46849ee22d990da rw,relatime - aufs none rw,si=9b4a7640128d839c
+159 15 0:3492 / /var/lib/docker/aufs/mnt/db47a9e87173f7554f550c8a01891de79cf12acdd32e01f95c1a527a08bdfb2c rw,relatime - aufs none rw,si=9b4a764405a1d39c
+160 15 0:3493 / /var/lib/docker/aufs/mnt/55e827bf6d44d930ec0b827c98356eb8b68c3301e2d60d1429aa72e05b4c17df rw,relatime - aufs none rw,si=9b4a764405a1a39c
+162 15 0:3495 / /var/lib/docker/aufs/mnt/578dc4e0a87fc37ec081ca098430499a59639c09f6f12a8f48de29828a091aa6 rw,relatime - aufs none rw,si=9b4a76406d7d439c
+163 15 0:3496 / /var/lib/docker/aufs/mnt/728cc1cb04fa4bc6f7bf7a90980beda6d8fc0beb71630874c0747b994efb0798 rw,relatime - aufs none rw,si=9b4a76444f20e39c
+164 15 0:3497 / /var/lib/docker/aufs/mnt/5850cc4bd9b55aea46c7ad598f1785117607974084ea643580f58ce3222e683a rw,relatime - aufs none rw,si=9b4a7644a824239c
+165 15 0:3498 / /var/lib/docker/aufs/mnt/89443b3f766d5a37bc8b84e29da8b84e6a3ea8486d3cf154e2aae1816516e4a8 rw,relatime - aufs none rw,si=9b4a7644a824139c
+166 15 0:3499 / /var/lib/docker/aufs/mnt/f5ae8fd5a41a337907d16515bc3162525154b59c32314c695ecd092c3b47943d rw,relatime - aufs none rw,si=9b4a7644a824439c
+167 15 0:3500 / /var/lib/docker/aufs/mnt/5a430854f2a03a9e5f7cbc9f3fb46a8ebca526a5b3f435236d8295e5998798f5 rw,relatime - aufs none rw,si=9b4a7647fc82439c
+168 15 0:3501 / /var/lib/docker/aufs/mnt/eda16901ae4cead35070c39845cbf1e10bd6b8cb0ffa7879ae2d8a186e460f91 rw,relatime - aufs none rw,si=9b4a76441e0df39c
+169 15 0:3502 / /var/lib/docker/aufs/mnt/5a593721430c2a51b119ff86a7e06ea2b37e3b4131f8f1344d402b61b0c8d868 rw,relatime - aufs none rw,si=9b4a764248bad39c
+170 15 0:3503 / /var/lib/docker/aufs/mnt/d662ad0a30fbfa902e0962108685b9330597e1ee2abb16dc9462eb5a67fdd23f rw,relatime - aufs none rw,si=9b4a764248bae39c
+171 15 0:3504 / /var/lib/docker/aufs/mnt/5bc9de5c79812843fb36eee96bef1ddba812407861f572e33242f4ee10da2c15 rw,relatime - aufs none rw,si=9b4a764248ba839c
+172 15 0:3505 / /var/lib/docker/aufs/mnt/5e763de8e9b0f7d58d2e12a341e029ab4efb3b99788b175090d8209e971156c1 rw,relatime - aufs none rw,si=9b4a764248baa39c
+173 15 0:3506 / /var/lib/docker/aufs/mnt/b4431dc2739936f1df6387e337f5a0c99cf051900c896bd7fd46a870ce61c873 rw,relatime - aufs none rw,si=9b4a76401263539c
+174 15 0:3507 / /var/lib/docker/aufs/mnt/5f37830e5a02561ab8c67ea3113137ba69f67a60e41c05cb0e7a0edaa1925b24 rw,relatime - aufs none rw,si=9b4a76401263639c
+184 15 0:3508 / /var/lib/docker/aufs/mnt/62ea10b957e6533538a4633a1e1d678502f50ddcdd354b2ca275c54dd7a7793a rw,relatime - aufs none rw,si=9b4a76401263039c
+187 15 0:3509 / /var/lib/docker/aufs/mnt/d56ee9d44195fe390e042fda75ec15af5132adb6d5c69468fa8792f4e54a6953 rw,relatime - aufs none rw,si=9b4a76401263239c
+188 15 0:3510 / /var/lib/docker/aufs/mnt/6a300930673174549c2b62f36c933f0332a20735978c007c805a301f897146c5 rw,relatime - aufs none rw,si=9b4a76455d4c539c
+189 15 0:3511 / /var/lib/docker/aufs/mnt/64496c45c84d348c24d410015456d101601c30cab4d1998c395591caf7e57a70 rw,relatime - aufs none rw,si=9b4a76455d4c639c
+190 15 0:3512 / /var/lib/docker/aufs/mnt/65a6a645883fe97a7422cd5e71ebe0bc17c8e6302a5361edf52e89747387e908 rw,relatime - aufs none rw,si=9b4a76455d4c039c
+191 15 0:3513 / /var/lib/docker/aufs/mnt/672be40695f7b6e13b0a3ed9fc996c73727dede3481f58155950fcfad57ed616 rw,relatime - aufs none rw,si=9b4a76455d4c239c
+192 15 0:3514 / /var/lib/docker/aufs/mnt/d42438acb2bfb2169e1c0d8e917fc824f7c85d336dadb0b0af36dfe0f001b3ba rw,relatime - aufs none rw,si=9b4a7642bfded39c
+193 15 0:3515 / /var/lib/docker/aufs/mnt/b48a54abf26d01cb2ddd908b1ed6034d17397c1341bf0eb2b251a3e5b79be854 rw,relatime - aufs none rw,si=9b4a7642bfdee39c
+194 15 0:3516 / /var/lib/docker/aufs/mnt/76f27134491f052bfb87f59092126e53ef875d6851990e59195a9da16a9412f8 rw,relatime - aufs none rw,si=9b4a7642bfde839c
+195 15 0:3517 / /var/lib/docker/aufs/mnt/6bd626a5462b4f8a8e1cc7d10351326dca97a59b2758e5ea549a4f6350ce8a90 rw,relatime - aufs none rw,si=9b4a7642bfdea39c
+196 15 0:3518 / /var/lib/docker/aufs/mnt/f1fe3549dbd6f5ca615e9139d9b53f0c83a3b825565df37628eacc13e70cbd6d rw,relatime - aufs none rw,si=9b4a7642bfdf539c
+197 15 0:3519 / /var/lib/docker/aufs/mnt/6d0458c8426a9e93d58d0625737e6122e725c9408488ed9e3e649a9984e15c34 rw,relatime - aufs none rw,si=9b4a7642bfdf639c
+198 15 0:3520 / /var/lib/docker/aufs/mnt/6e4c97db83aa82145c9cf2bafc20d500c0b5389643b689e3ae84188c270a48c5 rw,relatime - aufs none rw,si=9b4a7642bfdf039c
+199 15 0:3521 / /var/lib/docker/aufs/mnt/eb94d6498f2c5969eaa9fa11ac2934f1ab90ef88e2d002258dca08e5ba74ea27 rw,relatime - aufs none rw,si=9b4a7642bfdf239c
+200 15 0:3522 / /var/lib/docker/aufs/mnt/fe3f88f0c511608a2eec5f13a98703aa16e55dbf930309723d8a37101f539fe1 rw,relatime - aufs none rw,si=9b4a7642bfc3539c
+201 15 0:3523 / /var/lib/docker/aufs/mnt/6f40c229fb9cad85fabf4b64a2640a5403ec03fe5ac1a57d0609fb8b606b9c83 rw,relatime - aufs none rw,si=9b4a7642bfc3639c
+202 15 0:3524 / /var/lib/docker/aufs/mnt/7513e9131f7a8acf58ff15248237feb767c78732ca46e159f4d791e6ef031dbc rw,relatime - aufs none rw,si=9b4a7642bfc3039c
+203 15 0:3525 / /var/lib/docker/aufs/mnt/79f48b00aa713cdf809c6bb7c7cb911b66e9a8076c81d6c9d2504139984ea2da rw,relatime - aufs none rw,si=9b4a7642bfc3239c
+204 15 0:3526 / /var/lib/docker/aufs/mnt/c3680418350d11358f0a96c676bc5aa74fa00a7c89e629ef5909d3557b060300 rw,relatime - aufs none rw,si=9b4a7642f47cd39c
+205 15 0:3527 / /var/lib/docker/aufs/mnt/7a1744dd350d7fcc0cccb6f1757ca4cbe5453f203a5888b0f1014d96ad5a5ef9 rw,relatime - aufs none rw,si=9b4a7642f47ce39c
+206 15 0:3528 / /var/lib/docker/aufs/mnt/7fa99662db046be9f03c33c35251afda9ccdc0085636bbba1d90592cec3ff68d rw,relatime - aufs none rw,si=9b4a7642f47c839c
+207 15 0:3529 / /var/lib/docker/aufs/mnt/f815021ef20da9c9b056bd1d52d8aaf6e2c0c19f11122fc793eb2b04eb995e35 rw,relatime - aufs none rw,si=9b4a7642f47ca39c
+208 15 0:3530 / /var/lib/docker/aufs/mnt/801086ae3110192d601dfcebdba2db92e86ce6b6a9dba6678ea04488e4513669 rw,relatime - aufs none rw,si=9b4a7642dc6dd39c
+209 15 0:3531 / /var/lib/docker/aufs/mnt/822ba7db69f21daddda87c01cfbfbf73013fc03a879daf96d16cdde6f9b1fbd6 rw,relatime - aufs none rw,si=9b4a7642dc6de39c
+210 15 0:3532 / /var/lib/docker/aufs/mnt/834227c1a950fef8cae3827489129d0dd220541e60c6b731caaa765bf2e6a199 rw,relatime - aufs none rw,si=9b4a7642dc6d839c
+211 15 0:3533 / /var/lib/docker/aufs/mnt/83dccbc385299bd1c7cf19326e791b33a544eea7b4cdfb6db70ea94eed4389fb rw,relatime - aufs none rw,si=9b4a7642dc6da39c
+212 15 0:3534 / /var/lib/docker/aufs/mnt/f1b8e6f0e7c8928b5dcdab944db89306ebcae3e0b32f9ff40d2daa8329f21600 rw,relatime - aufs none rw,si=9b4a7645a126039c
+213 15 0:3535 / /var/lib/docker/aufs/mnt/970efb262c7a020c2404cbcc5b3259efba0d110a786079faeef05bc2952abf3a rw,relatime - aufs none rw,si=9b4a7644c8ed139c
+214 15 0:3536 / /var/lib/docker/aufs/mnt/84b6d73af7450f3117a77e15a5ca1255871fea6182cd8e8a7be6bc744be18c2c rw,relatime - aufs none rw,si=9b4a76406559139c
+215 15 0:3537 / /var/lib/docker/aufs/mnt/88be2716e026bc681b5e63fe7942068773efbd0b6e901ca7ba441412006a96b6 rw,relatime - aufs none rw,si=9b4a76406559339c
+216 15 0:3538 / /var/lib/docker/aufs/mnt/c81939aa166ce50cd8bca5cfbbcc420a78e0318dd5cd7c755209b9166a00a752 rw,relatime - aufs none rw,si=9b4a76406559239c
+217 15 0:3539 / /var/lib/docker/aufs/mnt/e0f241645d64b7dc5ff6a8414087cca226be08fb54ce987d1d1f6350c57083aa rw,relatime - aufs none rw,si=9b4a7647cfc0f39c
+218 15 0:3540 / /var/lib/docker/aufs/mnt/e10e2bf75234ed51d8a6a4bb39e465404fecbe318e54400d3879cdb2b0679c78 rw,relatime - aufs none rw,si=9b4a7647cfc0939c
+219 15 0:3541 / /var/lib/docker/aufs/mnt/8f71d74c8cfc3228b82564aa9f09b2e576cff0083ddfb6aa5cb350346063f080 rw,relatime - aufs none rw,si=9b4a7647cfc0a39c
+220 15 0:3542 / /var/lib/docker/aufs/mnt/9159f1eba2aef7f5205cc18d015cda7f5933cd29bba3b1b8aed5ccb5824c69ee rw,relatime - aufs none rw,si=9b4a76468cedd39c
+221 15 0:3543 / /var/lib/docker/aufs/mnt/932cad71e652e048e500d9fbb5b8ea4fc9a269d42a3134ce527ceef42a2be56b rw,relatime - aufs none rw,si=9b4a76468cede39c
+222 15 0:3544 / /var/lib/docker/aufs/mnt/bf1e1b5f529e8943cc0144ee86dbaaa37885c1ddffcef29537e0078ee7dd316a rw,relatime - aufs none rw,si=9b4a76468ced839c
+223 15 0:3545 / /var/lib/docker/aufs/mnt/949d93ecf3322e09f858ce81d5f4b434068ec44ff84c375de03104f7b45ee955 rw,relatime - aufs none rw,si=9b4a76468ceda39c
+224 15 0:3546 / /var/lib/docker/aufs/mnt/d65c6087f92dc2a3841b5251d2fe9ca07d4c6e5b021597692479740816e4e2a1 rw,relatime - aufs none rw,si=9b4a7645a126239c
+225 15 0:3547 / /var/lib/docker/aufs/mnt/98a0153119d0651c193d053d254f6e16a68345a141baa80c87ae487e9d33f290 rw,relatime - aufs none rw,si=9b4a7640787cf39c
+226 15 0:3548 / /var/lib/docker/aufs/mnt/99daf7fe5847c017392f6e59aa9706b3dfdd9e6d1ba11dae0f7fffde0a60b5e5 rw,relatime - aufs none rw,si=9b4a7640787c839c
+227 15 0:3549 / /var/lib/docker/aufs/mnt/9ad1f2fe8a5599d4e10c5a6effa7f03d932d4e92ee13149031a372087a359079 rw,relatime - aufs none rw,si=9b4a7640787ca39c
+228 15 0:3550 / /var/lib/docker/aufs/mnt/c26d64494da782ddac26f8370d86ac93e7c1666d88a7b99110fc86b35ea6a85d rw,relatime - aufs none rw,si=9b4a7642fc6b539c
+229 15 0:3551 / /var/lib/docker/aufs/mnt/a49e4a8275133c230ec640997f35f172312eb0ea5bd2bbe10abf34aae98f30eb rw,relatime - aufs none rw,si=9b4a7642fc6b639c
+230 15 0:3552 / /var/lib/docker/aufs/mnt/b5e2740c867ed843025f49d84e8d769de9e8e6039b3c8cb0735b5bf358994bc7 rw,relatime - aufs none rw,si=9b4a7642fc6b039c
+231 15 0:3553 / /var/lib/docker/aufs/mnt/a826fdcf3a7039b30570054579b65763db605a314275d7aef31b872c13311b4b rw,relatime - aufs none rw,si=9b4a7642fc6b239c
+232 15 0:3554 / /var/lib/docker/aufs/mnt/addf3025babf5e43b5a3f4a0da7ad863dda3c01fb8365c58fd8d28bb61dc11bc rw,relatime - aufs none rw,si=9b4a76407871d39c
+233 15 0:3555 / /var/lib/docker/aufs/mnt/c5b6c6813ab3e5ebdc6d22cb2a3d3106a62095f2c298be52b07a3b0fa20ff690 rw,relatime - aufs none rw,si=9b4a76407871e39c
+234 15 0:3556 / /var/lib/docker/aufs/mnt/af0609eaaf64e2392060cb46f5a9f3d681a219bb4c651d4f015bf573fbe6c4cf rw,relatime - aufs none rw,si=9b4a76407871839c
+235 15 0:3557 / /var/lib/docker/aufs/mnt/e7f20e3c37ecad39cd90a97cd3549466d0d106ce4f0a930b8495442634fa4a1f rw,relatime - aufs none rw,si=9b4a76407871a39c
+237 15 0:3559 / /var/lib/docker/aufs/mnt/b57a53d440ffd0c1295804fa68cdde35d2fed5409484627e71b9c37e4249fd5c rw,relatime - aufs none rw,si=9b4a76444445a39c
+238 15 0:3560 / /var/lib/docker/aufs/mnt/b5e7d7b8f35e47efbba3d80c5d722f5e7bd43e54c824e54b4a4b351714d36d42 rw,relatime - aufs none rw,si=9b4a7647932d439c
+239 15 0:3561 / /var/lib/docker/aufs/mnt/f1b136def157e9465640658f277f3347de593c6ae76412a2e79f7002f091cae2 rw,relatime - aufs none rw,si=9b4a76445abcd39c
+240 15 0:3562 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=9b4a7644403b339c
+241 15 0:3563 / /var/lib/docker/aufs/mnt/b89b140cdbc95063761864e0a23346207fa27ee4c5c63a1ae85c9069a9d9cf1d rw,relatime - aufs none rw,si=9b4a7644aa19739c
+242 15 0:3564 / /var/lib/docker/aufs/mnt/bc6a69ed51c07f5228f6b4f161c892e6a949c0e7e86a9c3432049d4c0e5cd298 rw,relatime - aufs none rw,si=9b4a7644aa19139c
+243 15 0:3565 / /var/lib/docker/aufs/mnt/be4e2ba3f136933e239f7cf3d136f484fb9004f1fbdfee24a62a2c7b0ab30670 rw,relatime - aufs none rw,si=9b4a7644aa19339c
+244 15 0:3566 / /var/lib/docker/aufs/mnt/e04ca1a4a5171e30d20f0c92f90a50b8b6f8600af5459c4b4fb25e42e864dfe1 rw,relatime - aufs none rw,si=9b4a7647932d139c
+245 15 0:3567 / /var/lib/docker/aufs/mnt/be61576b31db893129aaffcd3dcb5ce35e49c4b71b30c392a78609a45c7323d8 rw,relatime - aufs none rw,si=9b4a7642d85f739c
+246 15 0:3568 / /var/lib/docker/aufs/mnt/dda42c191e56becf672327658ab84fcb563322db3764b91c2fefe4aaef04c624 rw,relatime - aufs none rw,si=9b4a7642d85f139c
+247 15 0:3569 / /var/lib/docker/aufs/mnt/c0a7995053330f3d88969247a2e72b07e2dd692133f5668a4a35ea3905561072 rw,relatime - aufs none rw,si=9b4a7642d85f339c
+249 15 0:3571 / /var/lib/docker/aufs/mnt/c3594b2e5f08c59ff5ed338a1ba1eceeeb1f7fc5d180068338110c00b1eb8502 rw,relatime - aufs none rw,si=9b4a7642738c739c
+250 15 0:3572 / /var/lib/docker/aufs/mnt/c58dce03a0ab0a7588393880379dc3bce9f96ec08ed3f99cf1555260ff0031e8 rw,relatime - aufs none rw,si=9b4a7642738c139c
+251 15 0:3573 / /var/lib/docker/aufs/mnt/c73e9f1d109c9d14cb36e1c7489df85649be3911116d76c2fd3648ec8fd94e23 rw,relatime - aufs none rw,si=9b4a7642738c339c
+252 15 0:3574 / /var/lib/docker/aufs/mnt/c9eef28c344877cd68aa09e543c0710ab2b305a0ff96dbb859bfa7808c3e8d01 rw,relatime - aufs none rw,si=9b4a7642d85f439c
+253 15 0:3575 / /var/lib/docker/aufs/mnt/feb67148f548d70cb7484f2aaad2a86051cd6867a561741a2f13b552457d666e rw,relatime - aufs none rw,si=9b4a76468c55739c
+254 15 0:3576 / /var/lib/docker/aufs/mnt/cdf1f96c36d35a96041a896bf398ec0f7dc3b0fb0643612a0f4b6ff96e04e1bb rw,relatime - aufs none rw,si=9b4a76468c55139c
+255 15 0:3577 / /var/lib/docker/aufs/mnt/ec6e505872353268451ac4bc034c1df00f3bae4a3ea2261c6e48f7bd5417c1b3 rw,relatime - aufs none rw,si=9b4a76468c55339c
+256 15 0:3578 / /var/lib/docker/aufs/mnt/d6dc8aca64efd90e0bc10274001882d0efb310d42ccbf5712b99b169053b8b1a rw,relatime - aufs none rw,si=9b4a7642738c439c
+257 15 0:3579 / /var/lib/docker/aufs/mnt/d712594e2ff6eaeb895bfd150d694bd1305fb927e7a186b2dab7df2ea95f8f81 rw,relatime - aufs none rw,si=9b4a76401268f39c
+259 15 0:3581 / /var/lib/docker/aufs/mnt/dbfa1174cd78cde2d7410eae442af0b416c4a0e6f87ed4ff1e9f169a0029abc0 rw,relatime - aufs none rw,si=9b4a76401268b39c
+260 15 0:3582 / /var/lib/docker/aufs/mnt/e883f5a82316d7856fbe93ee8c0af5a920b7079619dd95c4ffd88bbd309d28dd rw,relatime - aufs none rw,si=9b4a76468c55439c
+261 15 0:3583 / /var/lib/docker/aufs/mnt/fdec3eff581c4fc2b09f87befa2fa021f3f2d373bea636a87f1fb5b367d6347a rw,relatime - aufs none rw,si=9b4a7644aa1af39c
+262 15 0:3584 / /var/lib/docker/aufs/mnt/ef764e26712184653067ecf7afea18a80854c41331ca0f0ef03e1bacf90a6ffc rw,relatime - aufs none rw,si=9b4a7644aa1a939c
+263 15 0:3585 / /var/lib/docker/aufs/mnt/f3176b40c41fce8ce6942936359a2001a6f1b5c1bb40ee224186db0789ec2f76 rw,relatime - aufs none rw,si=9b4a7644aa1ab39c
+264 15 0:3586 / /var/lib/docker/aufs/mnt/f5daf06785d3565c6dd18ea7d953d9a8b9606107781e63270fe0514508736e6a rw,relatime - aufs none rw,si=9b4a76401268c39c
+58 15 0:3587 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8-init rw,relatime - aufs none rw,si=9b4a76444445839c
+67 15 0:3588 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8 rw,relatime - aufs none rw,si=9b4a7644badd339c
+265 15 0:3610 / /var/lib/docker/aufs/mnt/e812472cd2c8c4748d1ef71fac4e77e50d661b9349abe66ce3e23511ed44f414 rw,relatime - aufs none rw,si=9b4a76427937d39c
+270 15 0:3615 / /var/lib/docker/aufs/mnt/997636e7c5c9d0d1376a217e295c14c205350b62bc12052804fb5f90abe6f183 rw,relatime - aufs none rw,si=9b4a76406540739c
+273 15 0:3618 / /var/lib/docker/aufs/mnt/d5794d080417b6e52e69227c3873e0e4c1ff0d5a845ebe3860ec2f89a47a2a1e rw,relatime - aufs none rw,si=9b4a76454814039c
+278 15 0:3623 / /var/lib/docker/aufs/mnt/586bdd48baced671bb19bc4d294ec325f26c55545ae267db426424f157d59c48 rw,relatime - aufs none rw,si=9b4a7644b439f39c
+281 15 0:3626 / /var/lib/docker/aufs/mnt/69739d022f89f8586908bbd5edbbdd95ea5256356f177f9ffcc6ef9c0ea752d2 rw,relatime - aufs none rw,si=9b4a7644a0f1b39c
+286 15 0:3631 / /var/lib/docker/aufs/mnt/ff28c27d5f894363993622de26d5dd352dba072f219e4691d6498c19bbbc15a9 rw,relatime - aufs none rw,si=9b4a7642265b339c
+289 15 0:3634 / /var/lib/docker/aufs/mnt/aa128fe0e64fdede333aa48fd9de39530c91a9244a0f0649a3c411c61e372daa rw,relatime - aufs none rw,si=9b4a764012ada39c
+99 15 8:33 / /media/REMOVE\040ME rw,nosuid,nodev,relatime - fuseblk /dev/sdc1 rw,user_id=0,group_id=0,allow_other,blksize=4096`
+)
+
+func TestParseFedoraMountinfo(t *testing.T) {
+ r := bytes.NewBuffer([]byte(fedoraMountinfo))
+ _, err := parseInfoFile(r)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestParseUbuntuMountinfo(t *testing.T) {
+ r := bytes.NewBuffer([]byte(ubuntuMountInfo))
+ _, err := parseInfoFile(r)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestParseGentooMountinfo(t *testing.T) {
+ r := bytes.NewBuffer([]byte(gentooMountinfo))
+ _, err := parseInfoFile(r)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestParseFedoraMountinfoFields(t *testing.T) {
+ r := bytes.NewBuffer([]byte(fedoraMountinfo))
+ infos, err := parseInfoFile(r)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expectedLength := 58
+ if len(infos) != expectedLength {
+ t.Fatalf("Expected %d entries, got %d", expectedLength, len(infos))
+ }
+ mi := Info{
+ ID: 15,
+ Parent: 35,
+ Major: 0,
+ Minor: 3,
+ Root: "/",
+ Mountpoint: "/proc",
+ Opts: "rw,nosuid,nodev,noexec,relatime",
+ Optional: "shared:5",
+ Fstype: "proc",
+ Source: "proc",
+ VfsOpts: "rw",
+ }
+
+ if *infos[0] != mi {
+ t.Fatalf("expected %#v, got %#v", mi, infos[0])
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go
new file mode 100644
index 00000000000..ad9ab57f8b8
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go
@@ -0,0 +1,37 @@
+// +build solaris,cgo
+
+package mount
+
+/*
+#include
+#include
+*/
+import "C"
+
+import (
+ "fmt"
+)
+
+func parseMountTable() ([]*Info, error) {
+ mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r"))
+ if mnttab == nil {
+ return nil, fmt.Errorf("Failed to open %s", C.MNTTAB)
+ }
+
+ var out []*Info
+ var mp C.struct_mnttab
+
+ ret := C.getmntent(mnttab, &mp)
+ for ret == 0 {
+ var mountinfo Info
+ mountinfo.Mountpoint = C.GoString(mp.mnt_mountp)
+ mountinfo.Source = C.GoString(mp.mnt_special)
+ mountinfo.Fstype = C.GoString(mp.mnt_fstype)
+ mountinfo.Opts = C.GoString(mp.mnt_mntopts)
+ out = append(out, &mountinfo)
+ ret = C.getmntent(mnttab, &mp)
+ }
+
+ C.fclose(mnttab)
+ return out, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go
new file mode 100644
index 00000000000..7fbcf19214b
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go
@@ -0,0 +1,12 @@
+// +build !windows,!linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo
+
+package mount
+
+import (
+ "fmt"
+ "runtime"
+)
+
+func parseMountTable() ([]*Info, error) {
+ return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go
new file mode 100644
index 00000000000..dab8a37ed01
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go
@@ -0,0 +1,6 @@
+package mount
+
+func parseMountTable() ([]*Info, error) {
+ // Do NOT return an error!
+ return nil, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go
new file mode 100644
index 00000000000..8ceec84bc6c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go
@@ -0,0 +1,69 @@
+// +build linux
+
+package mount
+
+// MakeShared ensures a mounted filesystem has the SHARED mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeShared(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "shared")
+}
+
+// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeRShared(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "rshared")
+}
+
+// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakePrivate(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "private")
+}
+
+// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option
+// enabled. See the supported options in flags.go for further reference.
+func MakeRPrivate(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "rprivate")
+}
+
+// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeSlave(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "slave")
+}
+
+// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled.
+// See the supported options in flags.go for further reference.
+func MakeRSlave(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "rslave")
+}
+
+// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option
+// enabled. See the supported options in flags.go for further reference.
+func MakeUnbindable(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "unbindable")
+}
+
+// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount
+// option enabled. See the supported options in flags.go for further reference.
+func MakeRUnbindable(mountPoint string) error {
+ return ensureMountedAs(mountPoint, "runbindable")
+}
+
+func ensureMountedAs(mountPoint, options string) error {
+ mounted, err := Mounted(mountPoint)
+ if err != nil {
+ return err
+ }
+
+ if !mounted {
+ if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil {
+ return err
+ }
+ }
+ if _, err = Mounted(mountPoint); err != nil {
+ return err
+ }
+
+ return ForceMount("", mountPoint, "none", options)
+}
diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go
new file mode 100644
index 00000000000..c1837942e3a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux_test.go
@@ -0,0 +1,331 @@
+// +build linux
+
+package mount
+
+import (
+ "os"
+ "path"
+ "syscall"
+ "testing"
+)
+
+// nothing is propagated in or out
+func TestSubtreePrivate(t *testing.T) {
+ tmp := path.Join(os.TempDir(), "mount-tests")
+ if err := os.MkdirAll(tmp, 0777); err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmp)
+
+ var (
+ sourceDir = path.Join(tmp, "source")
+ targetDir = path.Join(tmp, "target")
+ outside1Dir = path.Join(tmp, "outside1")
+ outside2Dir = path.Join(tmp, "outside2")
+
+ outside1Path = path.Join(outside1Dir, "file.txt")
+ outside2Path = path.Join(outside2Dir, "file.txt")
+ outside1CheckPath = path.Join(targetDir, "a", "file.txt")
+ outside2CheckPath = path.Join(sourceDir, "b", "file.txt")
+ )
+ if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.Mkdir(targetDir, 0777); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.Mkdir(outside1Dir, 0777); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.Mkdir(outside2Dir, 0777); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := createFile(outside1Path); err != nil {
+ t.Fatal(err)
+ }
+ if err := createFile(outside2Path); err != nil {
+ t.Fatal(err)
+ }
+
+ // mount the shared directory to a target
+ if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ if err := Unmount(targetDir); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ // next, make the target private
+ if err := MakePrivate(targetDir); err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ if err := Unmount(targetDir); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ // mount in an outside path to a mounted path inside the _source_
+ if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ if err := Unmount(path.Join(sourceDir, "a")); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ // check that this file _does_not_ show in the _target_
+ if _, err := os.Stat(outside1CheckPath); err != nil && !os.IsNotExist(err) {
+ t.Fatal(err)
+ } else if err == nil {
+ t.Fatalf("%q should not be visible, but is", outside1CheckPath)
+ }
+
+ // next mount outside2Dir into the _target_
+ if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ if err := Unmount(path.Join(targetDir, "b")); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ // check that this file _does_not_ show in the _source_
+ if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) {
+ t.Fatal(err)
+ } else if err == nil {
+ t.Fatalf("%q should not be visible, but is", outside2CheckPath)
+ }
+}
+
+// Testing that when a target is a shared mount,
+// then child mounts propagate to the source
+func TestSubtreeShared(t *testing.T) {
+ tmp := path.Join(os.TempDir(), "mount-tests")
+ if err := os.MkdirAll(tmp, 0777); err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmp)
+
+ var (
+ sourceDir = path.Join(tmp, "source")
+ targetDir = path.Join(tmp, "target")
+ outsideDir = path.Join(tmp, "outside")
+
+ outsidePath = path.Join(outsideDir, "file.txt")
+ sourceCheckPath = path.Join(sourceDir, "a", "file.txt")
+ )
+
+ if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.Mkdir(targetDir, 0777); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.Mkdir(outsideDir, 0777); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := createFile(outsidePath); err != nil {
+ t.Fatal(err)
+ }
+
+ // mount the source as shared
+ if err := MakeShared(sourceDir); err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ if err := Unmount(sourceDir); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ // mount the shared directory to a target
+ if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ if err := Unmount(targetDir); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ // mount in an outside path to a mounted path inside the target
+ if err := Mount(outsideDir, path.Join(targetDir, "a"), "none", "bind,rw"); err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ if err := Unmount(path.Join(targetDir, "a")); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ // NOW, check that the file from the outside directory is available in the source directory
+ if _, err := os.Stat(sourceCheckPath); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// testing that mounts to a shared source show up in the slave target,
+// and that mounts into a slave target do _not_ show up in the shared source
+func TestSubtreeSharedSlave(t *testing.T) {
+ tmp := path.Join(os.TempDir(), "mount-tests")
+ if err := os.MkdirAll(tmp, 0777); err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmp)
+
+ var (
+ sourceDir = path.Join(tmp, "source")
+ targetDir = path.Join(tmp, "target")
+ outside1Dir = path.Join(tmp, "outside1")
+ outside2Dir = path.Join(tmp, "outside2")
+
+ outside1Path = path.Join(outside1Dir, "file.txt")
+ outside2Path = path.Join(outside2Dir, "file.txt")
+ outside1CheckPath = path.Join(targetDir, "a", "file.txt")
+ outside2CheckPath = path.Join(sourceDir, "b", "file.txt")
+ )
+ if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.Mkdir(targetDir, 0777); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.Mkdir(outside1Dir, 0777); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.Mkdir(outside2Dir, 0777); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := createFile(outside1Path); err != nil {
+ t.Fatal(err)
+ }
+ if err := createFile(outside2Path); err != nil {
+ t.Fatal(err)
+ }
+
+ // mount the source as shared
+ if err := MakeShared(sourceDir); err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ if err := Unmount(sourceDir); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ // mount the shared directory to a target
+ if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ if err := Unmount(targetDir); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ // next, make the target slave
+ if err := MakeSlave(targetDir); err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ if err := Unmount(targetDir); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ // mount in an outside path to a mounted path inside the _source_
+ if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ if err := Unmount(path.Join(sourceDir, "a")); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ // check that this file _does_ show in the _target_
+ if _, err := os.Stat(outside1CheckPath); err != nil {
+ t.Fatal(err)
+ }
+
+ // next mount outside2Dir into the _target_
+ if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ if err := Unmount(path.Join(targetDir, "b")); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ // check that this file _does_not_ show in the _source_
+ if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) {
+ t.Fatal(err)
+ } else if err == nil {
+ t.Fatalf("%q should not be visible, but is", outside2CheckPath)
+ }
+}
+
+func TestSubtreeUnbindable(t *testing.T) {
+ tmp := path.Join(os.TempDir(), "mount-tests")
+ if err := os.MkdirAll(tmp, 0777); err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmp)
+
+ var (
+ sourceDir = path.Join(tmp, "source")
+ targetDir = path.Join(tmp, "target")
+ )
+ if err := os.MkdirAll(sourceDir, 0777); err != nil {
+ t.Fatal(err)
+ }
+ if err := os.MkdirAll(targetDir, 0777); err != nil {
+ t.Fatal(err)
+ }
+
+ // next, make the source unbindable
+ if err := MakeUnbindable(sourceDir); err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ if err := Unmount(sourceDir); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ // then attempt to mount it to target. It should fail
+ if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil && err != syscall.EINVAL {
+ t.Fatal(err)
+ } else if err == nil {
+ t.Fatalf("%q should not have been bindable", sourceDir)
+ }
+ defer func() {
+ if err := Unmount(targetDir); err != nil {
+ t.Fatal(err)
+ }
+ }()
+}
+
+func createFile(path string) error {
+ f, err := os.Create(path)
+ if err != nil {
+ return err
+ }
+ f.WriteString("hello world!")
+ return f.Close()
+}
diff --git a/vendor/github.com/docker/docker/pkg/namesgenerator/cmd/names-generator/main.go b/vendor/github.com/docker/docker/pkg/namesgenerator/cmd/names-generator/main.go
new file mode 100644
index 00000000000..18a939b70b8
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/namesgenerator/cmd/names-generator/main.go
@@ -0,0 +1,11 @@
+package main
+
+import (
+ "fmt"
+
+ "github.com/docker/docker/pkg/namesgenerator"
+)
+
+func main() {
+ fmt.Println(namesgenerator.GetRandomName(0))
+}
diff --git a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go
new file mode 100644
index 00000000000..6109f1b8e49
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go
@@ -0,0 +1,552 @@
+package namesgenerator
+
+import (
+ "fmt"
+
+ "github.com/docker/docker/pkg/random"
+)
+
+var (
+ left = [...]string{
+ "admiring",
+ "adoring",
+ "agitated",
+ "amazing",
+ "angry",
+ "awesome",
+ "backstabbing",
+ "berserk",
+ "big",
+ "boring",
+ "clever",
+ "cocky",
+ "compassionate",
+ "condescending",
+ "cranky",
+ "desperate",
+ "determined",
+ "distracted",
+ "dreamy",
+ "drunk",
+ "ecstatic",
+ "elated",
+ "elegant",
+ "evil",
+ "fervent",
+ "focused",
+ "furious",
+ "gigantic",
+ "gloomy",
+ "goofy",
+ "grave",
+ "happy",
+ "high",
+ "hopeful",
+ "hungry",
+ "infallible",
+ "jolly",
+ "jovial",
+ "kickass",
+ "lonely",
+ "loving",
+ "mad",
+ "modest",
+ "naughty",
+ "nauseous",
+ "nostalgic",
+ "peaceful",
+ "pedantic",
+ "pensive",
+ "prickly",
+ "reverent",
+ "romantic",
+ "sad",
+ "serene",
+ "sharp",
+ "sick",
+ "silly",
+ "sleepy",
+ "small",
+ "stoic",
+ "stupefied",
+ "suspicious",
+ "tender",
+ "thirsty",
+ "tiny",
+ "trusting",
+ "zen",
+ }
+
+ // Docker, starting from 0.7.x, generates names from notable scientists and hackers.
+ // Please, for any amazing man that you add to the list, consider adding an equally amazing woman to it, and vice versa.
+ right = [...]string{
+ // Muhammad ibn Jābir al-Ḥarrānī al-Battānī was a founding father of astronomy. https://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB
+ "albattani",
+
+ // Frances E. Allen, became the first female IBM Fellow in 1989. In 2006, she became the first female recipient of the ACM's Turing Award. https://en.wikipedia.org/wiki/Frances_E._Allen
+ "allen",
+
+ // June Almeida - Scottish virologist who took the first pictures of the rubella virus - https://en.wikipedia.org/wiki/June_Almeida
+ "almeida",
+
+ // Maria Gaetana Agnesi - Italian mathematician, philosopher, theologian and humanitarian. She was the first woman to write a mathematics handbook and the first woman appointed as a Mathematics Professor at a University. https://en.wikipedia.org/wiki/Maria_Gaetana_Agnesi
+ "agnesi",
+
+ // Archimedes was a physicist, engineer and mathematician who invented too many things to list them here. https://en.wikipedia.org/wiki/Archimedes
+ "archimedes",
+
+ // Maria Ardinghelli - Italian translator, mathematician and physicist - https://en.wikipedia.org/wiki/Maria_Ardinghelli
+ "ardinghelli",
+
+ // Aryabhata - Ancient Indian mathematician-astronomer during 476-550 CE https://en.wikipedia.org/wiki/Aryabhata
+ "aryabhata",
+
+ // Wanda Austin - Wanda Austin is the President and CEO of The Aerospace Corporation, a leading architect for the US security space programs. https://en.wikipedia.org/wiki/Wanda_Austin
+ "austin",
+
+ // Charles Babbage invented the concept of a programmable computer. https://en.wikipedia.org/wiki/Charles_Babbage.
+ "babbage",
+
+ // Stefan Banach - Polish mathematician, was one of the founders of modern functional analysis. https://en.wikipedia.org/wiki/Stefan_Banach
+ "banach",
+
+ // John Bardeen co-invented the transistor - https://en.wikipedia.org/wiki/John_Bardeen
+ "bardeen",
+
+ // Jean Bartik, born Betty Jean Jennings, was one of the original programmers for the ENIAC computer. https://en.wikipedia.org/wiki/Jean_Bartik
+ "bartik",
+
+ // Laura Bassi, the world's first female professor https://en.wikipedia.org/wiki/Laura_Bassi
+ "bassi",
+
+ // Alexander Graham Bell - an eminent Scottish-born scientist, inventor, engineer and innovator who is credited with inventing the first practical telephone - https://en.wikipedia.org/wiki/Alexander_Graham_Bell
+ "bell",
+
+ // Homi J Bhabha - was an Indian nuclear physicist, founding director, and professor of physics at the Tata Institute of Fundamental Research. Colloquially known as "father of Indian nuclear programme"- https://en.wikipedia.org/wiki/Homi_J._Bhabha
+ "bhabha",
+
+ // Bhaskara II - Ancient Indian mathematician-astronomer whose work on calculus predates Newton and Leibniz by over half a millennium - https://en.wikipedia.org/wiki/Bh%C4%81skara_II#Calculus
+ "bhaskara",
+
+ // Elizabeth Blackwell - American doctor and first American woman to receive a medical degree - https://en.wikipedia.org/wiki/Elizabeth_Blackwell
+ "blackwell",
+
+ // Niels Bohr is the father of quantum theory. https://en.wikipedia.org/wiki/Niels_Bohr.
+ "bohr",
+
+ // Kathleen Booth, she's credited with writing the first assembly language. https://en.wikipedia.org/wiki/Kathleen_Booth
+ "booth",
+
+ // Anita Borg - Anita Borg was the founding director of the Institute for Women and Technology (IWT). https://en.wikipedia.org/wiki/Anita_Borg
+ "borg",
+
+ // Satyendra Nath Bose - He provided the foundation for Bose–Einstein statistics and the theory of the Bose–Einstein condensate. - https://en.wikipedia.org/wiki/Satyendra_Nath_Bose
+ "bose",
+
+ // Evelyn Boyd Granville - She was one of the first African-American woman to receive a Ph.D. in mathematics; she earned it in 1949 from Yale University. https://en.wikipedia.org/wiki/Evelyn_Boyd_Granville
+ "boyd",
+
+ // Brahmagupta - Ancient Indian mathematician during 598-670 CE who gave rules to compute with zero - https://en.wikipedia.org/wiki/Brahmagupta#Zero
+ "brahmagupta",
+
+ // Walter Houser Brattain co-invented the transistor - https://en.wikipedia.org/wiki/Walter_Houser_Brattain
+ "brattain",
+
+ // Emmett Brown invented time travel. https://en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff)
+ "brown",
+
+ // Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are credited with advancing the global environmental movement. https://en.wikipedia.org/wiki/Rachel_Carson
+ "carson",
+
+ // Subrahmanyan Chandrasekhar - Astrophysicist known for his mathematical theory on different stages and evolution in structures of the stars. He has won nobel prize for physics - https://en.wikipedia.org/wiki/Subrahmanyan_Chandrasekhar
+ "chandrasekhar",
+
+ //Claude Shannon - The father of information theory and founder of digital circuit design theory. (https://en.wikipedia.org/wiki/Claude_Shannon)
+ "shannon",
+
+ // Jane Colden - American botanist widely considered the first female American botanist - https://en.wikipedia.org/wiki/Jane_Colden
+ "colden",
+
+ // Gerty Theresa Cori - American biochemist who became the third woman—and first American woman—to win a Nobel Prize in science, and the first woman to be awarded the Nobel Prize in Physiology or Medicine. Cori was born in Prague. https://en.wikipedia.org/wiki/Gerty_Cori
+ "cori",
+
+ // Seymour Roger Cray was an American electrical engineer and supercomputer architect who designed a series of computers that were the fastest in the world for decades. https://en.wikipedia.org/wiki/Seymour_Cray
+ "cray",
+
+ // This entry reflects a husband and wife team who worked together:
+ // Joan Curran was a Welsh scientist who developed radar and invented chaff, a radar countermeasure. https://en.wikipedia.org/wiki/Joan_Curran
+ // Samuel Curran was an Irish physicist who worked alongside his wife during WWII and invented the proximity fuse. https://en.wikipedia.org/wiki/Samuel_Curran
+ "curran",
+
+ // Marie Curie discovered radioactivity. https://en.wikipedia.org/wiki/Marie_Curie.
+ "curie",
+
+ // Charles Darwin established the principles of natural evolution. https://en.wikipedia.org/wiki/Charles_Darwin.
+ "darwin",
+
+ // Leonardo Da Vinci invented too many things to list here. https://en.wikipedia.org/wiki/Leonardo_da_Vinci.
+ "davinci",
+
+ // Edsger Wybe Dijkstra was a Dutch computer scientist and mathematical scientist. https://en.wikipedia.org/wiki/Edsger_W._Dijkstra.
+ "dijkstra",
+
+ // Donna Dubinsky - played an integral role in the development of personal digital assistants (PDAs) serving as CEO of Palm, Inc. and co-founding Handspring. https://en.wikipedia.org/wiki/Donna_Dubinsky
+ "dubinsky",
+
+ // Annie Easley - She was a leading member of the team which developed software for the Centaur rocket stage and one of the first African-Americans in her field. https://en.wikipedia.org/wiki/Annie_Easley
+ "easley",
+
+ // Thomas Alva Edison, prolific inventor https://en.wikipedia.org/wiki/Thomas_Edison
+ "edison",
+
+ // Albert Einstein invented the general theory of relativity. https://en.wikipedia.org/wiki/Albert_Einstein
+ "einstein",
+
+ // Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine - https://en.wikipedia.org/wiki/Gertrude_Elion
+ "elion",
+
+ // Douglas Engelbart gave the mother of all demos: https://en.wikipedia.org/wiki/Douglas_Engelbart
+ "engelbart",
+
+ // Euclid invented geometry. https://en.wikipedia.org/wiki/Euclid
+ "euclid",
+
+ // Leonhard Euler invented large parts of modern mathematics. https://de.wikipedia.org/wiki/Leonhard_Euler
+ "euler",
+
+ // Pierre de Fermat pioneered several aspects of modern mathematics. https://en.wikipedia.org/wiki/Pierre_de_Fermat
+ "fermat",
+
+ // Enrico Fermi invented the first nuclear reactor. https://en.wikipedia.org/wiki/Enrico_Fermi.
+ "fermi",
+
+ // Richard Feynman was a key contributor to quantum mechanics and particle physics. https://en.wikipedia.org/wiki/Richard_Feynman
+ "feynman",
+
+ // Benjamin Franklin is famous for his experiments in electricity and the invention of the lightning rod.
+ "franklin",
+
+ // Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific truth. https://en.wikipedia.org/wiki/Galileo_Galilei
+ "galileo",
+
+ // William Henry "Bill" Gates III is an American business magnate, philanthropist, investor, computer programmer, and inventor. https://en.wikipedia.org/wiki/Bill_Gates
+ "gates",
+
+ // Adele Goldberg, was one of the designers and developers of the Smalltalk language. https://en.wikipedia.org/wiki/Adele_Goldberg_(computer_scientist)
+ "goldberg",
+
+ // Adele Goldstine, born Adele Katz, wrote the complete technical description for the first electronic digital computer, ENIAC. https://en.wikipedia.org/wiki/Adele_Goldstine
+ "goldstine",
+
+ // Shafi Goldwasser is a computer scientist known for creating theoretical foundations of modern cryptography. Winner of 2012 ACM Turing Award. https://en.wikipedia.org/wiki/Shafi_Goldwasser
+ "goldwasser",
+
+ // James Golick, all around gangster.
+ "golick",
+
+ // Jane Goodall - British primatologist, ethologist, and anthropologist who is considered to be the world's foremost expert on chimpanzees - https://en.wikipedia.org/wiki/Jane_Goodall
+ "goodall",
+
+ // Margaret Hamilton - Director of the Software Engineering Division of the MIT Instrumentation Laboratory, which developed on-board flight software for the Apollo space program. https://en.wikipedia.org/wiki/Margaret_Hamilton_(scientist)
+ "hamilton",
+
+ // Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. https://en.wikipedia.org/wiki/Stephen_Hawking
+ "hawking",
+
+ // Werner Heisenberg was a founding father of quantum mechanics. https://en.wikipedia.org/wiki/Werner_Heisenberg
+ "heisenberg",
+
+ // Jaroslav Heyrovský was the inventor of the polarographic method, father of the electroanalytical method, and recipient of the Nobel Prize in 1959. His main field of work was polarography. https://en.wikipedia.org/wiki/Jaroslav_Heyrovsk%C3%BD
+ "heyrovsky",
+
+ // Dorothy Hodgkin was a British biochemist, credited with the development of protein crystallography. She was awarded the Nobel Prize in Chemistry in 1964. https://en.wikipedia.org/wiki/Dorothy_Hodgkin
+ "hodgkin",
+
+ // Erna Schneider Hoover revolutionized modern communication by inventing a computerized telephone switching method. https://en.wikipedia.org/wiki/Erna_Schneider_Hoover
+ "hoover",
+
+ // Grace Hopper developed the first compiler for a computer programming language and is credited with popularizing the term "debugging" for fixing computer glitches. https://en.wikipedia.org/wiki/Grace_Hopper
+ "hopper",
+
+ // Frances Hugle, she was an American scientist, engineer, and inventor who contributed to the understanding of semiconductors, integrated circuitry, and the unique electrical principles of microscopic materials. https://en.wikipedia.org/wiki/Frances_Hugle
+ "hugle",
+
+ // Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics - https://en.wikipedia.org/wiki/Hypatia
+ "hypatia",
+
+ // Yeong-Sil Jang was a Korean scientist and astronomer during the Joseon Dynasty; he invented the first metal printing press and water gauge. https://en.wikipedia.org/wiki/Jang_Yeong-sil
+ "jang",
+
+ // Betty Jennings - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Jean_Bartik
+ "jennings",
+
+ // Mary Lou Jepsen, was the founder and chief technology officer of One Laptop Per Child (OLPC), and the founder of Pixel Qi. https://en.wikipedia.org/wiki/Mary_Lou_Jepsen
+ "jepsen",
+
+ // Irène Joliot-Curie - French scientist who was awarded the Nobel Prize for Chemistry in 1935. Daughter of Marie and Pierre Curie. https://en.wikipedia.org/wiki/Ir%C3%A8ne_Joliot-Curie
+ "joliot",
+
+ // Karen Spärck Jones came up with the concept of inverse document frequency, which is used in most search engines today. https://en.wikipedia.org/wiki/Karen_Sp%C3%A4rck_Jones
+ "jones",
+
+ // A. P. J. Abdul Kalam - is an Indian scientist aka Missile Man of India for his work on the development of ballistic missile and launch vehicle technology - https://en.wikipedia.org/wiki/A._P._J._Abdul_Kalam
+ "kalam",
+
+ // Susan Kare, created the icons and many of the interface elements for the original Apple Macintosh in the 1980s, and was an original employee of NeXT, working as the Creative Director. https://en.wikipedia.org/wiki/Susan_Kare
+ "kare",
+
+ // Mary Kenneth Keller, Sister Mary Kenneth Keller became the first American woman to earn a PhD in Computer Science in 1965. https://en.wikipedia.org/wiki/Mary_Kenneth_Keller
+ "keller",
+
+ // Har Gobind Khorana - Indian-American biochemist who shared the 1968 Nobel Prize for Physiology - https://en.wikipedia.org/wiki/Har_Gobind_Khorana
+ "khorana",
+
+ // Jack Kilby invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Jack_Kilby
+ "kilby",
+
+ // Maria Kirch - German astronomer and first woman to discover a comet - https://en.wikipedia.org/wiki/Maria_Margarethe_Kirch
+ "kirch",
+
+ // Donald Knuth - American computer scientist, author of "The Art of Computer Programming" and creator of the TeX typesetting system. https://en.wikipedia.org/wiki/Donald_Knuth
+ "knuth",
+
+ // Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis, differential equations and mechanics - https://en.wikipedia.org/wiki/Sofia_Kovalevskaya
+ "kowalevski",
+
+ // Marie-Jeanne de Lalande - French astronomer, mathematician and cataloguer of stars - https://en.wikipedia.org/wiki/Marie-Jeanne_de_Lalande
+ "lalande",
+
+ // Hedy Lamarr - Actress and inventor. The principles of her work are now incorporated into modern Wi-Fi, CDMA and Bluetooth technology. https://en.wikipedia.org/wiki/Hedy_Lamarr
+ "lamarr",
+
+ // Leslie B. Lamport - American computer scientist. Lamport is best known for his seminal work in distributed systems and was the winner of the 2013 Turing Award. https://en.wikipedia.org/wiki/Leslie_Lamport
+ "lamport",
+
+ // Mary Leakey - British paleoanthropologist who discovered the first fossilized Proconsul skull - https://en.wikipedia.org/wiki/Mary_Leakey
+ "leakey",
+
+ // Henrietta Swan Leavitt - she was an American astronomer who discovered the relation between the luminosity and the period of Cepheid variable stars. https://en.wikipedia.org/wiki/Henrietta_Swan_Leavitt
+ "leavitt",
+
+ // Ruth Lichterman - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Ruth_Teitelbaum
+ "lichterman",
+
+ // Barbara Liskov - co-developed the Liskov substitution principle. Liskov was also the winner of the Turing Prize in 2008. - https://en.wikipedia.org/wiki/Barbara_Liskov
+ "liskov",
+
+ // Ada Lovelace invented the first algorithm. https://en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull)
+ "lovelace",
+
+ // Auguste and Louis Lumière - the first filmmakers in history - https://en.wikipedia.org/wiki/Auguste_and_Louis_Lumi%C3%A8re
+ "lumiere",
+
+ // Mahavira - Ancient Indian mathematician during 9th century AD who discovered basic algebraic identities - https://en.wikipedia.org/wiki/Mah%C4%81v%C4%ABra_(mathematician)
+ "mahavira",
+
+ // Maria Mayer - American theoretical physicist and Nobel laureate in Physics for proposing the nuclear shell model of the atomic nucleus - https://en.wikipedia.org/wiki/Maria_Mayer
+ "mayer",
+
+ // John McCarthy invented LISP: https://en.wikipedia.org/wiki/John_McCarthy_(computer_scientist)
+ "mccarthy",
+
+ // Barbara McClintock - a distinguished American cytogeneticist, 1983 Nobel Laureate in Physiology or Medicine for discovering transposons. https://en.wikipedia.org/wiki/Barbara_McClintock
+ "mcclintock",
+
+ // Malcolm McLean invented the modern shipping container: https://en.wikipedia.org/wiki/Malcom_McLean
+ "mclean",
+
+ // Kay McNulty - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Kathleen_Antonelli
+ "mcnulty",
+
+ // Lise Meitner - Austrian/Swedish physicist who was involved in the discovery of nuclear fission. The element meitnerium is named after her - https://en.wikipedia.org/wiki/Lise_Meitner
+ "meitner",
+
+ // Carla Meninsky, was the game designer and programmer for Atari 2600 games Dodge 'Em and Warlords. https://en.wikipedia.org/wiki/Carla_Meninsky
+ "meninsky",
+
+ // Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - https://en.wikipedia.org/wiki/Johanna_Mestorf
+ "mestorf",
+
+ // Marvin Minsky - Pioneer in Artificial Intelligence, co-founder of the MIT's AI Lab, won the Turing Award in 1969. https://en.wikipedia.org/wiki/Marvin_Minsky
+ "minsky",
+
+ // Maryam Mirzakhani - an Iranian mathematician and the first woman to win the Fields Medal. https://en.wikipedia.org/wiki/Maryam_Mirzakhani
+ "mirzakhani",
+
+ // Samuel Morse - contributed to the invention of a single-wire telegraph system based on European telegraphs and was a co-developer of the Morse code - https://en.wikipedia.org/wiki/Samuel_Morse
+ "morse",
+
+ // Ian Murdock - founder of the Debian project - https://en.wikipedia.org/wiki/Ian_Murdock
+ "murdock",
+
+ // Isaac Newton invented classic mechanics and modern optics. https://en.wikipedia.org/wiki/Isaac_Newton
+ "newton",
+
+ // Florence Nightingale, more prominently known as a nurse, was also the first female member of the Royal Statistical Society and a pioneer in statistical graphics https://en.wikipedia.org/wiki/Florence_Nightingale#Statistics_and_sanitary_reform
+ "nightingale",
+
+ // Alfred Nobel - a Swedish chemist, engineer, innovator, and armaments manufacturer (inventor of dynamite) - https://en.wikipedia.org/wiki/Alfred_Nobel
+ "nobel",
+
+ // Emmy Noether, German mathematician. Noether's Theorem is named after her. https://en.wikipedia.org/wiki/Emmy_Noether
+ "noether",
+
+ // Poppy Northcutt. Poppy Northcutt was the first woman to work as part of NASA’s Mission Control. http://www.businessinsider.com/poppy-northcutt-helped-apollo-astronauts-2014-12?op=1
+ "northcutt",
+
+ // Robert Noyce invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Robert_Noyce
+ "noyce",
+
+ // Panini - Ancient Indian linguist and grammarian from 4th century CE who worked on the world's first formal system - https://en.wikipedia.org/wiki/P%C4%81%E1%B9%87ini#Comparison_with_modern_formal_systems
+ "panini",
+
+ // Ambroise Pare invented modern surgery. https://en.wikipedia.org/wiki/Ambroise_Par%C3%A9
+ "pare",
+
+ // Louis Pasteur discovered vaccination, fermentation and pasteurization. https://en.wikipedia.org/wiki/Louis_Pasteur.
+ "pasteur",
+
+ // Cecilia Payne-Gaposchkin was an astronomer and astrophysicist who, in 1925, proposed in her Ph.D. thesis an explanation for the composition of stars in terms of the relative abundances of hydrogen and helium. https://en.wikipedia.org/wiki/Cecilia_Payne-Gaposchkin
+ "payne",
+
+ // Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree protocol (STP). https://en.wikipedia.org/wiki/Radia_Perlman
+ "perlman",
+
+ // Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. https://en.wikipedia.org/wiki/Rob_Pike
+ "pike",
+
+ // Henri Poincaré made fundamental contributions in several fields of mathematics. https://en.wikipedia.org/wiki/Henri_Poincar%C3%A9
+ "poincare",
+
+ // Laura Poitras is a director and producer whose work, made possible by open source crypto tools, advances the causes of truth and freedom of information by reporting disclosures by whistleblowers such as Edward Snowden. https://en.wikipedia.org/wiki/Laura_Poitras
+ "poitras",
+
+ // Claudius Ptolemy - a Greco-Egyptian writer of Alexandria, known as a mathematician, astronomer, geographer, astrologer, and poet of a single epigram in the Greek Anthology - https://en.wikipedia.org/wiki/Ptolemy
+ "ptolemy",
+
+ // C. V. Raman - Indian physicist who won the Nobel Prize in 1930 for proposing the Raman effect. - https://en.wikipedia.org/wiki/C._V._Raman
+ "raman",
+
+ // Srinivasa Ramanujan - Indian mathematician and autodidact who made extraordinary contributions to mathematical analysis, number theory, infinite series, and continued fractions. - https://en.wikipedia.org/wiki/Srinivasa_Ramanujan
+ "ramanujan",
+
+ // Sally Kristen Ride was an American physicist and astronaut. She was the first American woman in space, and the youngest American astronaut. https://en.wikipedia.org/wiki/Sally_Ride
+ "ride",
+
+ // Rita Levi-Montalcini - Won Nobel Prize in Physiology or Medicine jointly with colleague Stanley Cohen for the discovery of nerve growth factor (https://en.wikipedia.org/wiki/Rita_Levi-Montalcini)
+ "montalcini",
+
+ // Dennis Ritchie - co-creator of UNIX and the C programming language. - https://en.wikipedia.org/wiki/Dennis_Ritchie
+ "ritchie",
+
+ // Wilhelm Conrad Röntgen - German physicist who was awarded the first Nobel Prize in Physics in 1901 for the discovery of X-rays (Röntgen rays). https://en.wikipedia.org/wiki/Wilhelm_R%C3%B6ntgen
+ "roentgen",
+
+ // Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the understanding of DNA - https://en.wikipedia.org/wiki/Rosalind_Franklin
+ "rosalind",
+
+ // Meghnad Saha - Indian astrophysicist best known for his development of the Saha equation, used to describe chemical and physical conditions in stars - https://en.wikipedia.org/wiki/Meghnad_Saha
+ "saha",
+
+ // Jean E. Sammet developed FORMAC, the first widely used computer language for symbolic manipulation of mathematical formulas. https://en.wikipedia.org/wiki/Jean_E._Sammet
+ "sammet",
+
+ // Carol Shaw - Originally an Atari employee, Carol Shaw is said to be the first female video game designer. https://en.wikipedia.org/wiki/Carol_Shaw_(video_game_designer)
+ "shaw",
+
+ // Dame Stephanie "Steve" Shirley - Founded a software company in 1962 employing women working from home. https://en.wikipedia.org/wiki/Steve_Shirley
+ "shirley",
+
+ // William Shockley co-invented the transistor - https://en.wikipedia.org/wiki/William_Shockley
+ "shockley",
+
+ // Françoise Barré-Sinoussi - French virologist and Nobel Prize Laureate in Physiology or Medicine; her work was fundamental in identifying HIV as the cause of AIDS. https://en.wikipedia.org/wiki/Fran%C3%A7oise_Barr%C3%A9-Sinoussi
+ "sinoussi",
+
+ // Betty Snyder - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Betty_Holberton
+ "snyder",
+
+ // Frances Spence - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Frances_Spence
+ "spence",
+
+ // Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. https://en.wikiquote.org/wiki/Richard_Stallman
+ "stallman",
+
+ // Michael Stonebraker is a database research pioneer and architect of Ingres, Postgres, VoltDB and SciDB. Winner of 2014 ACM Turing Award. https://en.wikipedia.org/wiki/Michael_Stonebraker
+ "stonebraker",
+
+ // Janese Swanson (with others) developed the first of the Carmen Sandiego games. She went on to found Girl Tech. https://en.wikipedia.org/wiki/Janese_Swanson
+ "swanson",
+
+ // Aaron Swartz was influential in creating RSS, Markdown, Creative Commons, Reddit, and much of the internet as we know it today. He was devoted to freedom of information on the web. https://en.wikiquote.org/wiki/Aaron_Swartz
+ "swartz",
+
+ // Bertha Swirles was a theoretical physicist who made a number of contributions to early quantum theory. https://en.wikipedia.org/wiki/Bertha_Swirles
+ "swirles",
+
+ // Nikola Tesla invented the AC electric system and every gadget ever used by a James Bond villain. https://en.wikipedia.org/wiki/Nikola_Tesla
+ "tesla",
+
+ // Ken Thompson - co-creator of UNIX and the C programming language - https://en.wikipedia.org/wiki/Ken_Thompson
+ "thompson",
+
+ // Linus Torvalds invented Linux and Git. https://en.wikipedia.org/wiki/Linus_Torvalds
+ "torvalds",
+
+ // Alan Turing was a founding father of computer science. https://en.wikipedia.org/wiki/Alan_Turing.
+ "turing",
+
+ // Varahamihira - Ancient Indian mathematician who discovered trigonometric formulae during 505-587 CE - https://en.wikipedia.org/wiki/Var%C4%81hamihira#Contributions
+ "varahamihira",
+
+ // Sir Mokshagundam Visvesvaraya - is a notable Indian engineer. He is a recipient of the Indian Republic's highest honour, the Bharat Ratna, in 1955. On his birthday, 15 September is celebrated as Engineer's Day in India in his memory - https://en.wikipedia.org/wiki/Visvesvaraya
+ "visvesvaraya",
+
+ // Christiane Nüsslein-Volhard - German biologist, won Nobel Prize in Physiology or Medicine in 1995 for research on the genetic control of embryonic development. https://en.wikipedia.org/wiki/Christiane_N%C3%BCsslein-Volhard
+ "volhard",
+
+ // Marlyn Wescoff - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Marlyn_Meltzer
+ "wescoff",
+
+ // Andrew Wiles - Notable British mathematician who proved the enigmatic Fermat's Last Theorem - https://en.wikipedia.org/wiki/Andrew_Wiles
+ "wiles",
+
+ // Roberta Williams, did pioneering work in graphical adventure games for personal computers, particularly the King's Quest series. https://en.wikipedia.org/wiki/Roberta_Williams
+ "williams",
+
+ // Sophie Wilson designed the first Acorn Micro-Computer and the instruction set for ARM processors. https://en.wikipedia.org/wiki/Sophie_Wilson
+ "wilson",
+
+ // Jeannette Wing - co-developed the Liskov substitution principle. - https://en.wikipedia.org/wiki/Jeannette_Wing
+ "wing",
+
+ // Steve Wozniak invented the Apple I and Apple II. https://en.wikipedia.org/wiki/Steve_Wozniak
+ "wozniak",
+
+ // The Wright brothers, Orville and Wilbur - credited with inventing and building the world's first successful airplane and making the first controlled, powered and sustained heavier-than-air human flight - https://en.wikipedia.org/wiki/Wright_brothers
+ "wright",
+
+ // Rosalyn Sussman Yalow - Rosalyn Sussman Yalow was an American medical physicist, and a co-winner of the 1977 Nobel Prize in Physiology or Medicine for development of the radioimmunoassay technique. https://en.wikipedia.org/wiki/Rosalyn_Sussman_Yalow
+ "yalow",
+
+ // Ada Yonath - an Israeli crystallographer, the first woman from the Middle East to win a Nobel prize in the sciences. https://en.wikipedia.org/wiki/Ada_Yonath
+ "yonath",
+ }
+)
+
+// GetRandomName generates a random name from the list of adjectives and surnames in this package
+// formatted as "adjective_surname". For example 'focused_turing'. If retry is non-zero, a random
+// integer between 0 and 10 will be added to the end of the name, e.g `focused_turing3`
+func GetRandomName(retry int) string {
+ rnd := random.Rand
+begin:
+ name := fmt.Sprintf("%s_%s", left[rnd.Intn(len(left))], right[rnd.Intn(len(right))])
+ if name == "boring_wozniak" /* Steve Wozniak is not boring */ {
+ goto begin
+ }
+
+ if retry > 0 {
+ name = fmt.Sprintf("%s%d", name, rnd.Intn(10))
+ }
+ return name
+}
diff --git a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator_test.go b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator_test.go
new file mode 100644
index 00000000000..d1a94977d7f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator_test.go
@@ -0,0 +1,27 @@
+package namesgenerator
+
+import (
+ "strings"
+ "testing"
+)
+
+func TestNameFormat(t *testing.T) {
+ name := GetRandomName(0)
+ if !strings.Contains(name, "_") {
+ t.Fatalf("Generated name does not contain an underscore")
+ }
+ if strings.ContainsAny(name, "0123456789") {
+ t.Fatalf("Generated name contains numbers!")
+ }
+}
+
+func TestNameRetries(t *testing.T) {
+ name := GetRandomName(1)
+ if !strings.Contains(name, "_") {
+ t.Fatalf("Generated name does not contain an underscore")
+ }
+ if !strings.ContainsAny(name, "0123456789") {
+ t.Fatalf("Generated name doesn't contain a number")
+ }
+
+}
diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go
new file mode 100644
index 00000000000..7738fc7411e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go
@@ -0,0 +1,74 @@
+// +build !windows
+
+// Package kernel provides helper function to get, parse and compare kernel
+// versions for different platforms.
+package kernel
+
+import (
+ "errors"
+ "fmt"
+)
+
+// VersionInfo holds information about the kernel.
+type VersionInfo struct {
+ Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4)
+ Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1)
+ Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2)
+ Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic)
+}
+
+func (k *VersionInfo) String() string {
+ return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor)
+}
+
+// CompareKernelVersion compares two kernel.VersionInfo structs.
+// Returns -1 if a < b, 0 if a == b, 1 it a > b
+func CompareKernelVersion(a, b VersionInfo) int {
+ if a.Kernel < b.Kernel {
+ return -1
+ } else if a.Kernel > b.Kernel {
+ return 1
+ }
+
+ if a.Major < b.Major {
+ return -1
+ } else if a.Major > b.Major {
+ return 1
+ }
+
+ if a.Minor < b.Minor {
+ return -1
+ } else if a.Minor > b.Minor {
+ return 1
+ }
+
+ return 0
+}
+
+// ParseRelease parses a string and creates a VersionInfo based on it.
+func ParseRelease(release string) (*VersionInfo, error) {
+ var (
+ kernel, major, minor, parsed int
+ flavor, partial string
+ )
+
+ // Ignore error from Sscanf to allow an empty flavor. Instead, just
+ // make sure we got all the version numbers.
+ parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial)
+ if parsed < 2 {
+ return nil, errors.New("Can't parse kernel version " + release)
+ }
+
+ // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64
+ parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor)
+ if parsed < 1 {
+ flavor = partial
+ }
+
+ return &VersionInfo{
+ Kernel: kernel,
+ Major: major,
+ Minor: minor,
+ Flavor: flavor,
+ }, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go
new file mode 100644
index 00000000000..71f205b2852
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go
@@ -0,0 +1,56 @@
+// +build darwin
+
+// Package kernel provides helper function to get, parse and compare kernel
+// versions for different platforms.
+package kernel
+
+import (
+ "fmt"
+ "os/exec"
+ "strings"
+
+ "github.com/mattn/go-shellwords"
+)
+
+// GetKernelVersion gets the current kernel version.
+func GetKernelVersion() (*VersionInfo, error) {
+ release, err := getRelease()
+ if err != nil {
+ return nil, err
+ }
+
+ return ParseRelease(release)
+}
+
+// getRelease uses `system_profiler SPSoftwareDataType` to get OSX kernel version
+func getRelease() (string, error) {
+ cmd := exec.Command("system_profiler", "SPSoftwareDataType")
+ osName, err := cmd.Output()
+ if err != nil {
+ return "", err
+ }
+
+ var release string
+ data := strings.Split(string(osName), "\n")
+ for _, line := range data {
+ if strings.Contains(line, "Kernel Version") {
+ // It has the format like ' Kernel Version: Darwin 14.5.0'
+ content := strings.SplitN(line, ":", 2)
+ if len(content) != 2 {
+ return "", fmt.Errorf("Kernel Version is invalid")
+ }
+
+ prettyNames, err := shellwords.Parse(content[1])
+ if err != nil {
+ return "", fmt.Errorf("Kernel Version is invalid: %s", err.Error())
+ }
+
+ if len(prettyNames) != 2 {
+ return "", fmt.Errorf("Kernel Version needs to be 'Darwin x.x.x' ")
+ }
+ release = prettyNames[1]
+ }
+ }
+
+ return release, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go
new file mode 100644
index 00000000000..54a89d28c66
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go
@@ -0,0 +1,30 @@
+// +build linux freebsd solaris
+
+// Package kernel provides helper function to get, parse and compare kernel
+// versions for different platforms.
+package kernel
+
+import (
+ "bytes"
+)
+
+// GetKernelVersion gets the current kernel version.
+func GetKernelVersion() (*VersionInfo, error) {
+ uts, err := uname()
+ if err != nil {
+ return nil, err
+ }
+
+ release := make([]byte, len(uts.Release))
+
+ i := 0
+ for _, c := range uts.Release {
+ release[i] = byte(c)
+ i++
+ }
+
+ // Remove the \x00 from the release for Atoi to parse correctly
+ release = release[:bytes.IndexByte(release, 0)]
+
+ return ParseRelease(string(release))
+}
diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix_test.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix_test.go
new file mode 100644
index 00000000000..dc8c0e307ba
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix_test.go
@@ -0,0 +1,96 @@
+// +build !windows
+
+package kernel
+
+import (
+ "fmt"
+ "testing"
+)
+
+func assertParseRelease(t *testing.T, release string, b *VersionInfo, result int) {
+ var (
+ a *VersionInfo
+ )
+ a, _ = ParseRelease(release)
+
+ if r := CompareKernelVersion(*a, *b); r != result {
+ t.Fatalf("Unexpected kernel version comparison result for (%v,%v). Found %d, expected %d", release, b, r, result)
+ }
+ if a.Flavor != b.Flavor {
+ t.Fatalf("Unexpected parsed kernel flavor. Found %s, expected %s", a.Flavor, b.Flavor)
+ }
+}
+
+// TestParseRelease tests the ParseRelease() function
+func TestParseRelease(t *testing.T) {
+ assertParseRelease(t, "3.8.0", &VersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0)
+ assertParseRelease(t, "3.4.54.longterm-1", &VersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0)
+ assertParseRelease(t, "3.4.54.longterm-1", &VersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0)
+ assertParseRelease(t, "3.8.0-19-generic", &VersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "-19-generic"}, 0)
+ assertParseRelease(t, "3.12.8tag", &VersionInfo{Kernel: 3, Major: 12, Minor: 8, Flavor: "tag"}, 0)
+ assertParseRelease(t, "3.12-1-amd64", &VersionInfo{Kernel: 3, Major: 12, Minor: 0, Flavor: "-1-amd64"}, 0)
+ assertParseRelease(t, "3.8.0", &VersionInfo{Kernel: 4, Major: 8, Minor: 0}, -1)
+ // Errors
+ invalids := []string{
+ "3",
+ "a",
+ "a.a",
+ "a.a.a-a",
+ }
+ for _, invalid := range invalids {
+ expectedMessage := fmt.Sprintf("Can't parse kernel version %v", invalid)
+ if _, err := ParseRelease(invalid); err == nil || err.Error() != expectedMessage {
+
+ }
+ }
+}
+
+func assertKernelVersion(t *testing.T, a, b VersionInfo, result int) {
+ if r := CompareKernelVersion(a, b); r != result {
+ t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result)
+ }
+}
+
+// TestCompareKernelVersion tests the CompareKernelVersion() function
+func TestCompareKernelVersion(t *testing.T) {
+ assertKernelVersion(t,
+ VersionInfo{Kernel: 3, Major: 8, Minor: 0},
+ VersionInfo{Kernel: 3, Major: 8, Minor: 0},
+ 0)
+ assertKernelVersion(t,
+ VersionInfo{Kernel: 2, Major: 6, Minor: 0},
+ VersionInfo{Kernel: 3, Major: 8, Minor: 0},
+ -1)
+ assertKernelVersion(t,
+ VersionInfo{Kernel: 3, Major: 8, Minor: 0},
+ VersionInfo{Kernel: 2, Major: 6, Minor: 0},
+ 1)
+ assertKernelVersion(t,
+ VersionInfo{Kernel: 3, Major: 8, Minor: 0},
+ VersionInfo{Kernel: 3, Major: 8, Minor: 0},
+ 0)
+ assertKernelVersion(t,
+ VersionInfo{Kernel: 3, Major: 8, Minor: 5},
+ VersionInfo{Kernel: 3, Major: 8, Minor: 0},
+ 1)
+ assertKernelVersion(t,
+ VersionInfo{Kernel: 3, Major: 0, Minor: 20},
+ VersionInfo{Kernel: 3, Major: 8, Minor: 0},
+ -1)
+ assertKernelVersion(t,
+ VersionInfo{Kernel: 3, Major: 7, Minor: 20},
+ VersionInfo{Kernel: 3, Major: 8, Minor: 0},
+ -1)
+ assertKernelVersion(t,
+ VersionInfo{Kernel: 3, Major: 8, Minor: 20},
+ VersionInfo{Kernel: 3, Major: 7, Minor: 0},
+ 1)
+ assertKernelVersion(t,
+ VersionInfo{Kernel: 3, Major: 8, Minor: 20},
+ VersionInfo{Kernel: 3, Major: 8, Minor: 0},
+ 1)
+ assertKernelVersion(t,
+ VersionInfo{Kernel: 3, Major: 8, Minor: 0},
+ VersionInfo{Kernel: 3, Major: 8, Minor: 20},
+ -1)
+}
diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go
new file mode 100644
index 00000000000..80fab8ff642
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go
@@ -0,0 +1,69 @@
+// +build windows
+
+package kernel
+
+import (
+ "fmt"
+ "syscall"
+ "unsafe"
+)
+
+// VersionInfo holds information about the kernel.
+type VersionInfo struct {
+ kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6)
+ major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1)
+ minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601)
+ build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592)
+}
+
+func (k *VersionInfo) String() string {
+ return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi)
+}
+
+// GetKernelVersion gets the current kernel version.
+func GetKernelVersion() (*VersionInfo, error) {
+
+ var (
+ h syscall.Handle
+ dwVersion uint32
+ err error
+ )
+
+ KVI := &VersionInfo{"Unknown", 0, 0, 0}
+
+ if err = syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE,
+ syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`),
+ 0,
+ syscall.KEY_READ,
+ &h); err != nil {
+ return KVI, err
+ }
+ defer syscall.RegCloseKey(h)
+
+ var buf [1 << 10]uint16
+ var typ uint32
+ n := uint32(len(buf) * 2) // api expects array of bytes, not uint16
+
+ if err = syscall.RegQueryValueEx(h,
+ syscall.StringToUTF16Ptr("BuildLabEx"),
+ nil,
+ &typ,
+ (*byte)(unsafe.Pointer(&buf[0])),
+ &n); err != nil {
+ return KVI, err
+ }
+
+ KVI.kvi = syscall.UTF16ToString(buf[:])
+
+ // Important - docker.exe MUST be manifested for this API to return
+ // the correct information.
+ if dwVersion, err = syscall.GetVersion(); err != nil {
+ return KVI, err
+ }
+
+ KVI.major = int(dwVersion & 0xFF)
+ KVI.minor = int((dwVersion & 0XFF00) >> 8)
+ KVI.build = int((dwVersion & 0xFFFF0000) >> 16)
+
+ return KVI, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go
new file mode 100644
index 00000000000..bb9b32641e8
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go
@@ -0,0 +1,19 @@
+package kernel
+
+import (
+ "syscall"
+)
+
+// Utsname represents the system name structure.
+// It is passthrough for syscall.Utsname in order to make it portable with
+// other platforms where it is not available.
+type Utsname syscall.Utsname
+
+func uname() (*syscall.Utsname, error) {
+ uts := &syscall.Utsname{}
+
+ if err := syscall.Uname(uts); err != nil {
+ return nil, err
+ }
+ return uts, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go
new file mode 100644
index 00000000000..49370bd3dd9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go
@@ -0,0 +1,14 @@
+package kernel
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+func uname() (*unix.Utsname, error) {
+ uts := &unix.Utsname{}
+
+ if err := unix.Uname(uts); err != nil {
+ return nil, err
+ }
+ return uts, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go
new file mode 100644
index 00000000000..1da3f239fac
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go
@@ -0,0 +1,18 @@
+// +build !linux,!solaris
+
+package kernel
+
+import (
+ "errors"
+)
+
+// Utsname represents the system name structure.
+// It is defined here to make it portable as it is available on linux but not
+// on windows.
+type Utsname struct {
+ Release [65]byte
+}
+
+func uname() (*Utsname, error) {
+ return nil, errors.New("Kernel version detection is available only on linux")
+}
diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go
new file mode 100644
index 00000000000..e04a3499af9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go
@@ -0,0 +1,77 @@
+// Package operatingsystem provides helper function to get the operating system
+// name for different platforms.
+package operatingsystem
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strings"
+
+ "github.com/mattn/go-shellwords"
+)
+
+var (
+ // file to use to detect if the daemon is running in a container
+ proc1Cgroup = "/proc/1/cgroup"
+
+ // file to check to determine Operating System
+ etcOsRelease = "/etc/os-release"
+
+ // used by stateless systems like Clear Linux
+ altOsRelease = "/usr/lib/os-release"
+)
+
+// GetOperatingSystem gets the name of the current operating system.
+func GetOperatingSystem() (string, error) {
+ osReleaseFile, err := os.Open(etcOsRelease)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return "", fmt.Errorf("Error opening %s: %v", etcOsRelease, err)
+ }
+ osReleaseFile, err = os.Open(altOsRelease)
+ if err != nil {
+ return "", fmt.Errorf("Error opening %s: %v", altOsRelease, err)
+ }
+ }
+ defer osReleaseFile.Close()
+
+ var prettyName string
+ scanner := bufio.NewScanner(osReleaseFile)
+ for scanner.Scan() {
+ line := scanner.Text()
+ if strings.HasPrefix(line, "PRETTY_NAME=") {
+ data := strings.SplitN(line, "=", 2)
+ prettyNames, err := shellwords.Parse(data[1])
+ if err != nil {
+ return "", fmt.Errorf("PRETTY_NAME is invalid: %s", err.Error())
+ }
+ if len(prettyNames) != 1 {
+ return "", fmt.Errorf("PRETTY_NAME needs to be enclosed by quotes if they have spaces: %s", data[1])
+ }
+ prettyName = prettyNames[0]
+ }
+ }
+ if prettyName != "" {
+ return prettyName, nil
+ }
+ // If not set, defaults to PRETTY_NAME="Linux"
+ // c.f. http://www.freedesktop.org/software/systemd/man/os-release.html
+ return "Linux", nil
+}
+
+// IsContainerized returns true if we are running inside a container.
+func IsContainerized() (bool, error) {
+ b, err := ioutil.ReadFile(proc1Cgroup)
+ if err != nil {
+ return false, err
+ }
+ for _, line := range bytes.Split(b, []byte{'\n'}) {
+ if len(line) > 0 && !bytes.HasSuffix(line, []byte{'/'}) && !bytes.HasSuffix(line, []byte("init.scope")) {
+ return true, nil
+ }
+ }
+ return false, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_solaris.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_solaris.go
new file mode 100644
index 00000000000..d08ad148604
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_solaris.go
@@ -0,0 +1,37 @@
+// +build solaris,cgo
+
+package operatingsystem
+
+/*
+#include
+*/
+import "C"
+
+import (
+ "bytes"
+ "errors"
+ "io/ioutil"
+)
+
+var etcOsRelease = "/etc/release"
+
+// GetOperatingSystem gets the name of the current operating system.
+func GetOperatingSystem() (string, error) {
+ b, err := ioutil.ReadFile(etcOsRelease)
+ if err != nil {
+ return "", err
+ }
+ if i := bytes.Index(b, []byte("\n")); i >= 0 {
+ b = bytes.Trim(b[:i], " ")
+ return string(b), nil
+ }
+ return "", errors.New("release not found")
+}
+
+// IsContainerized returns true if we are running inside a container.
+func IsContainerized() (bool, error) {
+ if C.getzoneid() != 0 {
+ return true, nil
+ }
+ return false, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix.go
new file mode 100644
index 00000000000..bc91c3c5337
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix.go
@@ -0,0 +1,25 @@
+// +build freebsd darwin
+
+package operatingsystem
+
+import (
+ "errors"
+ "os/exec"
+)
+
+// GetOperatingSystem gets the name of the current operating system.
+func GetOperatingSystem() (string, error) {
+ cmd := exec.Command("uname", "-s")
+ osName, err := cmd.Output()
+ if err != nil {
+ return "", err
+ }
+ return string(osName), nil
+}
+
+// IsContainerized returns true if we are running inside a container.
+// No-op on FreeBSD and Darwin, always returns false.
+func IsContainerized() (bool, error) {
+ // TODO: Implement jail detection for freeBSD
+ return false, errors.New("Cannot detect if we are in container")
+}
diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix_test.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix_test.go
new file mode 100644
index 00000000000..e7120c65c47
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix_test.go
@@ -0,0 +1,247 @@
+// +build linux freebsd
+
+package operatingsystem
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+func TestGetOperatingSystem(t *testing.T) {
+ var backup = etcOsRelease
+
+ invalids := []struct {
+ content string
+ errorExpected string
+ }{
+ {
+ `PRETTY_NAME=Source Mage GNU/Linux
+PRETTY_NAME=Ubuntu 14.04.LTS`,
+ "PRETTY_NAME needs to be enclosed by quotes if they have spaces: Source Mage GNU/Linux",
+ },
+ {
+ `PRETTY_NAME="Ubuntu Linux
+PRETTY_NAME=Ubuntu 14.04.LTS`,
+ "PRETTY_NAME is invalid: invalid command line string",
+ },
+ {
+ `PRETTY_NAME=Ubuntu'
+PRETTY_NAME=Ubuntu 14.04.LTS`,
+ "PRETTY_NAME is invalid: invalid command line string",
+ },
+ {
+ `PRETTY_NAME'
+PRETTY_NAME=Ubuntu 14.04.LTS`,
+ "PRETTY_NAME needs to be enclosed by quotes if they have spaces: Ubuntu 14.04.LTS",
+ },
+ }
+
+ valids := []struct {
+ content string
+ expected string
+ }{
+ {
+ `NAME="Ubuntu"
+PRETTY_NAME_AGAIN="Ubuntu 14.04.LTS"
+VERSION="14.04, Trusty Tahr"
+ID=ubuntu
+ID_LIKE=debian
+VERSION_ID="14.04"
+HOME_URL="http://www.ubuntu.com/"
+SUPPORT_URL="http://help.ubuntu.com/"
+BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`,
+ "Linux",
+ },
+ {
+ `NAME="Ubuntu"
+VERSION="14.04, Trusty Tahr"
+ID=ubuntu
+ID_LIKE=debian
+VERSION_ID="14.04"
+HOME_URL="http://www.ubuntu.com/"
+SUPPORT_URL="http://help.ubuntu.com/"
+BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`,
+ "Linux",
+ },
+ {
+ `NAME=Gentoo
+ID=gentoo
+PRETTY_NAME="Gentoo/Linux"
+ANSI_COLOR="1;32"
+HOME_URL="http://www.gentoo.org/"
+SUPPORT_URL="http://www.gentoo.org/main/en/support.xml"
+BUG_REPORT_URL="https://bugs.gentoo.org/"
+`,
+ "Gentoo/Linux",
+ },
+ {
+ `NAME="Ubuntu"
+VERSION="14.04, Trusty Tahr"
+ID=ubuntu
+ID_LIKE=debian
+PRETTY_NAME="Ubuntu 14.04 LTS"
+VERSION_ID="14.04"
+HOME_URL="http://www.ubuntu.com/"
+SUPPORT_URL="http://help.ubuntu.com/"
+BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`,
+ "Ubuntu 14.04 LTS",
+ },
+ {
+ `NAME="Ubuntu"
+VERSION="14.04, Trusty Tahr"
+ID=ubuntu
+ID_LIKE=debian
+PRETTY_NAME='Ubuntu 14.04 LTS'`,
+ "Ubuntu 14.04 LTS",
+ },
+ {
+ `PRETTY_NAME=Source
+NAME="Source Mage"`,
+ "Source",
+ },
+ {
+ `PRETTY_NAME=Source
+PRETTY_NAME="Source Mage"`,
+ "Source Mage",
+ },
+ }
+
+ dir := os.TempDir()
+ etcOsRelease = filepath.Join(dir, "etcOsRelease")
+
+ defer func() {
+ os.Remove(etcOsRelease)
+ etcOsRelease = backup
+ }()
+
+ for _, elt := range invalids {
+ if err := ioutil.WriteFile(etcOsRelease, []byte(elt.content), 0600); err != nil {
+ t.Fatalf("failed to write to %s: %v", etcOsRelease, err)
+ }
+ s, err := GetOperatingSystem()
+ if err == nil || err.Error() != elt.errorExpected {
+ t.Fatalf("Expected an error %q, got %q (err: %v)", elt.errorExpected, s, err)
+ }
+ }
+
+ for _, elt := range valids {
+ if err := ioutil.WriteFile(etcOsRelease, []byte(elt.content), 0600); err != nil {
+ t.Fatalf("failed to write to %s: %v", etcOsRelease, err)
+ }
+ s, err := GetOperatingSystem()
+ if err != nil || s != elt.expected {
+ t.Fatalf("Expected %q, got %q (err: %v)", elt.expected, s, err)
+ }
+ }
+}
+
+func TestIsContainerized(t *testing.T) {
+ var (
+ backup = proc1Cgroup
+ nonContainerizedProc1Cgroupsystemd226 = []byte(`9:memory:/init.scope
+8:net_cls,net_prio:/
+7:cpuset:/
+6:freezer:/
+5:devices:/init.scope
+4:blkio:/init.scope
+3:cpu,cpuacct:/init.scope
+2:perf_event:/
+1:name=systemd:/init.scope
+`)
+ nonContainerizedProc1Cgroup = []byte(`14:name=systemd:/
+13:hugetlb:/
+12:net_prio:/
+11:perf_event:/
+10:bfqio:/
+9:blkio:/
+8:net_cls:/
+7:freezer:/
+6:devices:/
+5:memory:/
+4:cpuacct:/
+3:cpu:/
+2:cpuset:/
+`)
+ containerizedProc1Cgroup = []byte(`9:perf_event:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d
+8:blkio:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d
+7:net_cls:/
+6:freezer:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d
+5:devices:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d
+4:memory:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d
+3:cpuacct:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d
+2:cpu:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d
+1:cpuset:/`)
+ )
+
+ dir := os.TempDir()
+ proc1Cgroup = filepath.Join(dir, "proc1Cgroup")
+
+ defer func() {
+ os.Remove(proc1Cgroup)
+ proc1Cgroup = backup
+ }()
+
+ if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroup, 0600); err != nil {
+ t.Fatalf("failed to write to %s: %v", proc1Cgroup, err)
+ }
+ inContainer, err := IsContainerized()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if inContainer {
+ t.Fatal("Wrongly assuming containerized")
+ }
+
+ if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroupsystemd226, 0600); err != nil {
+ t.Fatalf("failed to write to %s: %v", proc1Cgroup, err)
+ }
+ inContainer, err = IsContainerized()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if inContainer {
+ t.Fatal("Wrongly assuming containerized for systemd /init.scope cgroup layout")
+ }
+
+ if err := ioutil.WriteFile(proc1Cgroup, containerizedProc1Cgroup, 0600); err != nil {
+ t.Fatalf("failed to write to %s: %v", proc1Cgroup, err)
+ }
+ inContainer, err = IsContainerized()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !inContainer {
+ t.Fatal("Wrongly assuming non-containerized")
+ }
+}
+
+func TestOsReleaseFallback(t *testing.T) {
+ var backup = etcOsRelease
+ var altBackup = altOsRelease
+ dir := os.TempDir()
+ etcOsRelease = filepath.Join(dir, "etcOsRelease")
+ altOsRelease = filepath.Join(dir, "altOsRelease")
+
+ defer func() {
+ os.Remove(dir)
+ etcOsRelease = backup
+ altOsRelease = altBackup
+ }()
+ content := `NAME=Gentoo
+ID=gentoo
+PRETTY_NAME="Gentoo/Linux"
+ANSI_COLOR="1;32"
+HOME_URL="http://www.gentoo.org/"
+SUPPORT_URL="http://www.gentoo.org/main/en/support.xml"
+BUG_REPORT_URL="https://bugs.gentoo.org/"
+`
+ if err := ioutil.WriteFile(altOsRelease, []byte(content), 0600); err != nil {
+ t.Fatalf("failed to write to %s: %v", etcOsRelease, err)
+ }
+ s, err := GetOperatingSystem()
+ if err != nil || s != "Gentoo/Linux" {
+ t.Fatalf("Expected %q, got %q (err: %v)", "Gentoo/Linux", s, err)
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go
new file mode 100644
index 00000000000..3c86b6af9c2
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go
@@ -0,0 +1,49 @@
+package operatingsystem
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// See https://code.google.com/p/go/source/browse/src/pkg/mime/type_windows.go?r=d14520ac25bf6940785aabb71f5be453a286f58c
+// for a similar sample
+
+// GetOperatingSystem gets the name of the current operating system.
+func GetOperatingSystem() (string, error) {
+
+ var h syscall.Handle
+
+ // Default return value
+ ret := "Unknown Operating System"
+
+ if err := syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE,
+ syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`),
+ 0,
+ syscall.KEY_READ,
+ &h); err != nil {
+ return ret, err
+ }
+ defer syscall.RegCloseKey(h)
+
+ var buf [1 << 10]uint16
+ var typ uint32
+ n := uint32(len(buf) * 2) // api expects array of bytes, not uint16
+
+ if err := syscall.RegQueryValueEx(h,
+ syscall.StringToUTF16Ptr("ProductName"),
+ nil,
+ &typ,
+ (*byte)(unsafe.Pointer(&buf[0])),
+ &n); err != nil {
+ return ret, err
+ }
+ ret = syscall.UTF16ToString(buf[:])
+
+ return ret, nil
+}
+
+// IsContainerized returns true if we are running inside a container.
+// No-op on Windows, always returns false.
+func IsContainerized() (bool, error) {
+ return false, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/parsers/parsers.go b/vendor/github.com/docker/docker/pkg/parsers/parsers.go
new file mode 100644
index 00000000000..acc897168f3
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/parsers/parsers.go
@@ -0,0 +1,69 @@
+// Package parsers provides helper functions to parse and validate different type
+// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel
+// operating system versions.
+package parsers
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value)
+func ParseKeyValueOpt(opt string) (string, string, error) {
+ parts := strings.SplitN(opt, "=", 2)
+ if len(parts) != 2 {
+ return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt)
+ }
+ return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil
+}
+
+// ParseUintList parses and validates the specified string as the value
+// found in some cgroup file (e.g. `cpuset.cpus`, `cpuset.mems`), which could be
+// one of the formats below. Note that duplicates are actually allowed in the
+// input string. It returns a `map[int]bool` with available elements from `val`
+// set to `true`.
+// Supported formats:
+// 7
+// 1-6
+// 0,3-4,7,8-10
+// 0-0,0,1-7
+// 03,1-3 <- this is gonna get parsed as [1,2,3]
+// 3,2,1
+// 0-2,3,1
+func ParseUintList(val string) (map[int]bool, error) {
+ if val == "" {
+ return map[int]bool{}, nil
+ }
+
+ availableInts := make(map[int]bool)
+ split := strings.Split(val, ",")
+ errInvalidFormat := fmt.Errorf("invalid format: %s", val)
+
+ for _, r := range split {
+ if !strings.Contains(r, "-") {
+ v, err := strconv.Atoi(r)
+ if err != nil {
+ return nil, errInvalidFormat
+ }
+ availableInts[v] = true
+ } else {
+ split := strings.SplitN(r, "-", 2)
+ min, err := strconv.Atoi(split[0])
+ if err != nil {
+ return nil, errInvalidFormat
+ }
+ max, err := strconv.Atoi(split[1])
+ if err != nil {
+ return nil, errInvalidFormat
+ }
+ if max < min {
+ return nil, errInvalidFormat
+ }
+ for i := min; i <= max; i++ {
+ availableInts[i] = true
+ }
+ }
+ }
+ return availableInts, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/parsers/parsers_test.go b/vendor/github.com/docker/docker/pkg/parsers/parsers_test.go
new file mode 100644
index 00000000000..7f19e902799
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/parsers/parsers_test.go
@@ -0,0 +1,70 @@
+package parsers
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestParseKeyValueOpt(t *testing.T) {
+ invalids := map[string]string{
+ "": "Unable to parse key/value option: ",
+ "key": "Unable to parse key/value option: key",
+ }
+ for invalid, expectedError := range invalids {
+ if _, _, err := ParseKeyValueOpt(invalid); err == nil || err.Error() != expectedError {
+ t.Fatalf("Expected error %v for %v, got %v", expectedError, invalid, err)
+ }
+ }
+ valids := map[string][]string{
+ "key=value": {"key", "value"},
+ " key = value ": {"key", "value"},
+ "key=value1=value2": {"key", "value1=value2"},
+ " key = value1 = value2 ": {"key", "value1 = value2"},
+ }
+ for valid, expectedKeyValue := range valids {
+ key, value, err := ParseKeyValueOpt(valid)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if key != expectedKeyValue[0] || value != expectedKeyValue[1] {
+ t.Fatalf("Expected {%v: %v} got {%v: %v}", expectedKeyValue[0], expectedKeyValue[1], key, value)
+ }
+ }
+}
+
+func TestParseUintList(t *testing.T) {
+ valids := map[string]map[int]bool{
+ "": {},
+ "7": {7: true},
+ "1-6": {1: true, 2: true, 3: true, 4: true, 5: true, 6: true},
+ "0-7": {0: true, 1: true, 2: true, 3: true, 4: true, 5: true, 6: true, 7: true},
+ "0,3-4,7,8-10": {0: true, 3: true, 4: true, 7: true, 8: true, 9: true, 10: true},
+ "0-0,0,1-4": {0: true, 1: true, 2: true, 3: true, 4: true},
+ "03,1-3": {1: true, 2: true, 3: true},
+ "3,2,1": {1: true, 2: true, 3: true},
+ "0-2,3,1": {0: true, 1: true, 2: true, 3: true},
+ }
+ for k, v := range valids {
+ out, err := ParseUintList(k)
+ if err != nil {
+ t.Fatalf("Expected not to fail, got %v", err)
+ }
+ if !reflect.DeepEqual(out, v) {
+ t.Fatalf("Expected %v, got %v", v, out)
+ }
+ }
+
+ invalids := []string{
+ "this",
+ "1--",
+ "1-10,,10",
+ "10-1",
+ "-1",
+ "-1,0",
+ }
+ for _, v := range invalids {
+ if out, err := ParseUintList(v); err == nil {
+ t.Fatalf("Expected failure with %s but got %v", v, out)
+ }
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/pidfile/pidfile.go b/vendor/github.com/docker/docker/pkg/pidfile/pidfile.go
new file mode 100644
index 00000000000..e1ac6bee355
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/pidfile/pidfile.go
@@ -0,0 +1,49 @@
+// Package pidfile provides structure and helper functions to create and remove
+// PID file. A PID file is usually a file used to store the process ID of a
+// running process.
+package pidfile
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// PIDFile is a file used to store the process ID of a running process.
+type PIDFile struct {
+ path string
+}
+
+func checkPIDFileAlreadyExists(path string) error {
+ if pidByte, err := ioutil.ReadFile(path); err == nil {
+ pidString := strings.TrimSpace(string(pidByte))
+ if pid, err := strconv.Atoi(pidString); err == nil {
+ if processExists(pid) {
+ return fmt.Errorf("pid file found, ensure docker is not running or delete %s", path)
+ }
+ }
+ }
+ return nil
+}
+
+// New creates a PIDfile using the specified path.
+func New(path string) (*PIDFile, error) {
+ if err := checkPIDFileAlreadyExists(path); err != nil {
+ return nil, err
+ }
+ if err := ioutil.WriteFile(path, []byte(fmt.Sprintf("%d", os.Getpid())), 0644); err != nil {
+ return nil, err
+ }
+
+ return &PIDFile{path: path}, nil
+}
+
+// Remove removes the PIDFile.
+func (file PIDFile) Remove() error {
+ if err := os.Remove(file.path); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/pidfile/pidfile_test.go b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_test.go
new file mode 100644
index 00000000000..73e8af76db7
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_test.go
@@ -0,0 +1,38 @@
+package pidfile
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+func TestNewAndRemove(t *testing.T) {
+ dir, err := ioutil.TempDir(os.TempDir(), "test-pidfile")
+ if err != nil {
+ t.Fatal("Could not create test directory")
+ }
+
+ path := filepath.Join(dir, "testfile")
+ file, err := New(path)
+ if err != nil {
+ t.Fatal("Could not create test file", err)
+ }
+
+ _, err = New(path)
+ if err == nil {
+ t.Fatal("Test file creation not blocked")
+ }
+
+ if err := file.Remove(); err != nil {
+ t.Fatal("Could not delete created test file")
+ }
+}
+
+func TestRemoveInvalidPath(t *testing.T) {
+ file := PIDFile{path: filepath.Join("foo", "bar")}
+
+ if err := file.Remove(); err == nil {
+ t.Fatal("Non-existing file doesn't give an error on delete")
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/pidfile/pidfile_unix.go b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_unix.go
new file mode 100644
index 00000000000..28f3deca919
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_unix.go
@@ -0,0 +1,16 @@
+// +build !windows
+
+package pidfile
+
+import (
+ "os"
+ "path/filepath"
+ "strconv"
+)
+
+func processExists(pid int) bool {
+ if _, err := os.Stat(filepath.Join("/proc", strconv.Itoa(pid))); err == nil {
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/docker/docker/pkg/pidfile/pidfile_windows.go b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_windows.go
new file mode 100644
index 00000000000..ae489c627a9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_windows.go
@@ -0,0 +1,23 @@
+package pidfile
+
+import "syscall"
+
+const (
+ processQueryLimitedInformation = 0x1000
+
+ stillActive = 259
+)
+
+func processExists(pid int) bool {
+ h, err := syscall.OpenProcess(processQueryLimitedInformation, false, uint32(pid))
+ if err != nil {
+ return false
+ }
+ var c uint32
+ err = syscall.GetExitCodeProcess(h, &c)
+ syscall.Close(h)
+ if err != nil {
+ return c == stillActive
+ }
+ return true
+}
diff --git a/vendor/github.com/docker/docker/pkg/platform/architecture_linux.go b/vendor/github.com/docker/docker/pkg/platform/architecture_linux.go
new file mode 100644
index 00000000000..2cdc2c59184
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/platform/architecture_linux.go
@@ -0,0 +1,16 @@
+// Package platform provides helper function to get the runtime architecture
+// for different platforms.
+package platform
+
+import (
+ "syscall"
+)
+
+// runtimeArchitecture gets the name of the current architecture (x86, x86_64, …)
+func runtimeArchitecture() (string, error) {
+ utsname := &syscall.Utsname{}
+ if err := syscall.Uname(utsname); err != nil {
+ return "", err
+ }
+ return charsToString(utsname.Machine), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/platform/architecture_unix.go b/vendor/github.com/docker/docker/pkg/platform/architecture_unix.go
new file mode 100644
index 00000000000..45bbcf1535a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/platform/architecture_unix.go
@@ -0,0 +1,20 @@
+// +build freebsd solaris darwin
+
+// Package platform provides helper function to get the runtime architecture
+// for different platforms.
+package platform
+
+import (
+ "os/exec"
+ "strings"
+)
+
+// runtimeArchitecture gets the name of the current architecture (x86, x86_64, i86pc, sun4v, ...)
+func runtimeArchitecture() (string, error) {
+ cmd := exec.Command("/usr/bin/uname", "-m")
+ machine, err := cmd.Output()
+ if err != nil {
+ return "", err
+ }
+ return strings.TrimSpace(string(machine)), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/platform/architecture_windows.go b/vendor/github.com/docker/docker/pkg/platform/architecture_windows.go
new file mode 100644
index 00000000000..0dd8a2e416a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/platform/architecture_windows.go
@@ -0,0 +1,52 @@
+package platform
+
+import (
+ "fmt"
+ "syscall"
+ "unsafe"
+)
+
+var (
+ modkernel32 = syscall.NewLazyDLL("kernel32.dll")
+ procGetSystemInfo = modkernel32.NewProc("GetSystemInfo")
+)
+
+// see http://msdn.microsoft.com/en-us/library/windows/desktop/ms724958(v=vs.85).aspx
+type systeminfo struct {
+ wProcessorArchitecture uint16
+ wReserved uint16
+ dwPageSize uint32
+ lpMinimumApplicationAddress uintptr
+ lpMaximumApplicationAddress uintptr
+ dwActiveProcessorMask uintptr
+ dwNumberOfProcessors uint32
+ dwProcessorType uint32
+ dwAllocationGranularity uint32
+ wProcessorLevel uint16
+ wProcessorRevision uint16
+}
+
+// Constants
+const (
+ ProcessorArchitecture64 = 9 // PROCESSOR_ARCHITECTURE_AMD64
+ ProcessorArchitectureIA64 = 6 // PROCESSOR_ARCHITECTURE_IA64
+ ProcessorArchitecture32 = 0 // PROCESSOR_ARCHITECTURE_INTEL
+ ProcessorArchitectureArm = 5 // PROCESSOR_ARCHITECTURE_ARM
+)
+
+var sysinfo systeminfo
+
+// runtimeArchitecture gets the name of the current architecture (x86, x86_64, …)
+func runtimeArchitecture() (string, error) {
+ syscall.Syscall(procGetSystemInfo.Addr(), 1, uintptr(unsafe.Pointer(&sysinfo)), 0, 0)
+ switch sysinfo.wProcessorArchitecture {
+ case ProcessorArchitecture64, ProcessorArchitectureIA64:
+ return "x86_64", nil
+ case ProcessorArchitecture32:
+ return "i686", nil
+ case ProcessorArchitectureArm:
+ return "arm", nil
+ default:
+ return "", fmt.Errorf("Unknown processor architecture")
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/platform/platform.go b/vendor/github.com/docker/docker/pkg/platform/platform.go
new file mode 100644
index 00000000000..e4b03122f47
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/platform/platform.go
@@ -0,0 +1,23 @@
+package platform
+
+import (
+ "runtime"
+
+ "github.com/Sirupsen/logrus"
+)
+
+var (
+ // Architecture holds the runtime architecture of the process.
+ Architecture string
+ // OSType holds the runtime operating system type (Linux, …) of the process.
+ OSType string
+)
+
+func init() {
+ var err error
+ Architecture, err = runtimeArchitecture()
+ if err != nil {
+ logrus.Errorf("Could not read system architecture info: %v", err)
+ }
+ OSType = runtime.GOOS
+}
diff --git a/vendor/github.com/docker/docker/pkg/platform/utsname_int8.go b/vendor/github.com/docker/docker/pkg/platform/utsname_int8.go
new file mode 100644
index 00000000000..5dcbadfdfe4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/platform/utsname_int8.go
@@ -0,0 +1,18 @@
+// +build linux,386 linux,amd64 linux,arm64
+// see golang's sources src/syscall/ztypes_linux_*.go that use int8
+
+package platform
+
+// Convert the OS/ARCH-specific utsname.Machine to string
+// given as an array of signed int8
+func charsToString(ca [65]int8) string {
+ s := make([]byte, len(ca))
+ var lens int
+ for ; lens < len(ca); lens++ {
+ if ca[lens] == 0 {
+ break
+ }
+ s[lens] = uint8(ca[lens])
+ }
+ return string(s[0:lens])
+}
diff --git a/vendor/github.com/docker/docker/pkg/platform/utsname_uint8.go b/vendor/github.com/docker/docker/pkg/platform/utsname_uint8.go
new file mode 100644
index 00000000000..c9875cf6e6f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/platform/utsname_uint8.go
@@ -0,0 +1,18 @@
+// +build linux,arm linux,ppc64 linux,ppc64le s390x
+// see golang's sources src/syscall/ztypes_linux_*.go that use uint8
+
+package platform
+
+// Convert the OS/ARCH-specific utsname.Machine to string
+// given as an array of unsigned uint8
+func charsToString(ca [65]uint8) string {
+ s := make([]byte, len(ca))
+ var lens int
+ for ; lens < len(ca); lens++ {
+ if ca[lens] == 0 {
+ break
+ }
+ s[lens] = ca[lens]
+ }
+ return string(s[0:lens])
+}
diff --git a/vendor/github.com/docker/docker/pkg/plugins/client.go b/vendor/github.com/docker/docker/pkg/plugins/client.go
new file mode 100644
index 00000000000..a778677f7cb
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/plugins/client.go
@@ -0,0 +1,188 @@
+package plugins
+
+import (
+ "bytes"
+ "encoding/json"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/plugins/transport"
+ "github.com/docker/go-connections/sockets"
+ "github.com/docker/go-connections/tlsconfig"
+)
+
+const (
+ defaultTimeOut = 30
+)
+
+// NewClient creates a new plugin client (http).
+func NewClient(addr string, tlsConfig *tlsconfig.Options) (*Client, error) {
+ tr := &http.Transport{}
+
+ if tlsConfig != nil {
+ c, err := tlsconfig.Client(*tlsConfig)
+ if err != nil {
+ return nil, err
+ }
+ tr.TLSClientConfig = c
+ }
+
+ u, err := url.Parse(addr)
+ if err != nil {
+ return nil, err
+ }
+ socket := u.Host
+ if socket == "" {
+ // valid local socket addresses have the host empty.
+ socket = u.Path
+ }
+ if err := sockets.ConfigureTransport(tr, u.Scheme, socket); err != nil {
+ return nil, err
+ }
+ scheme := httpScheme(u)
+
+ clientTransport := transport.NewHTTPTransport(tr, scheme, socket)
+ return NewClientWithTransport(clientTransport), nil
+}
+
+// NewClientWithTransport creates a new plugin client with a given transport.
+func NewClientWithTransport(tr transport.Transport) *Client {
+ return &Client{
+ http: &http.Client{
+ Transport: tr,
+ },
+ requestFactory: tr,
+ }
+}
+
+// Client represents a plugin client.
+type Client struct {
+ http *http.Client // http client to use
+ requestFactory transport.RequestFactory
+}
+
+// Call calls the specified method with the specified arguments for the plugin.
+// It will retry for 30 seconds if a failure occurs when calling.
+func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) error {
+ var buf bytes.Buffer
+ if args != nil {
+ if err := json.NewEncoder(&buf).Encode(args); err != nil {
+ return err
+ }
+ }
+ body, err := c.callWithRetry(serviceMethod, &buf, true)
+ if err != nil {
+ return err
+ }
+ defer body.Close()
+ if ret != nil {
+ if err := json.NewDecoder(body).Decode(&ret); err != nil {
+ logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err)
+ return err
+ }
+ }
+ return nil
+}
+
+// Stream calls the specified method with the specified arguments for the plugin and returns the response body
+func (c *Client) Stream(serviceMethod string, args interface{}) (io.ReadCloser, error) {
+ var buf bytes.Buffer
+ if err := json.NewEncoder(&buf).Encode(args); err != nil {
+ return nil, err
+ }
+ return c.callWithRetry(serviceMethod, &buf, true)
+}
+
+// SendFile calls the specified method, and passes through the IO stream
+func (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{}) error {
+ body, err := c.callWithRetry(serviceMethod, data, true)
+ if err != nil {
+ return err
+ }
+ defer body.Close()
+ if err := json.NewDecoder(body).Decode(&ret); err != nil {
+ logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err)
+ return err
+ }
+ return nil
+}
+
+func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) (io.ReadCloser, error) {
+ req, err := c.requestFactory.NewRequest(serviceMethod, data)
+ if err != nil {
+ return nil, err
+ }
+
+ var retries int
+ start := time.Now()
+
+ for {
+ resp, err := c.http.Do(req)
+ if err != nil {
+ if !retry {
+ return nil, err
+ }
+
+ timeOff := backoff(retries)
+ if abort(start, timeOff) {
+ return nil, err
+ }
+ retries++
+ logrus.Warnf("Unable to connect to plugin: %s%s: %v, retrying in %v", req.URL.Host, req.URL.Path, err, timeOff)
+ time.Sleep(timeOff)
+ continue
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ b, err := ioutil.ReadAll(resp.Body)
+ resp.Body.Close()
+ if err != nil {
+ return nil, &statusError{resp.StatusCode, serviceMethod, err.Error()}
+ }
+
+ // Plugins' Response(s) should have an Err field indicating what went
+ // wrong. Try to unmarshal into ResponseErr. Otherwise fallback to just
+ // return the string(body)
+ type responseErr struct {
+ Err string
+ }
+ remoteErr := responseErr{}
+ if err := json.Unmarshal(b, &remoteErr); err == nil {
+ if remoteErr.Err != "" {
+ return nil, &statusError{resp.StatusCode, serviceMethod, remoteErr.Err}
+ }
+ }
+ // old way...
+ return nil, &statusError{resp.StatusCode, serviceMethod, string(b)}
+ }
+ return resp.Body, nil
+ }
+}
+
+func backoff(retries int) time.Duration {
+ b, max := 1, defaultTimeOut
+ for b < max && retries > 0 {
+ b *= 2
+ retries--
+ }
+ if b > max {
+ b = max
+ }
+ return time.Duration(b) * time.Second
+}
+
+func abort(start time.Time, timeOff time.Duration) bool {
+ return timeOff+time.Since(start) >= time.Duration(defaultTimeOut)*time.Second
+}
+
+func httpScheme(u *url.URL) string {
+ scheme := u.Scheme
+ if scheme != "https" {
+ scheme = "http"
+ }
+ return scheme
+}
diff --git a/vendor/github.com/docker/docker/pkg/plugins/client_test.go b/vendor/github.com/docker/docker/pkg/plugins/client_test.go
new file mode 100644
index 00000000000..9faad86a154
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/plugins/client_test.go
@@ -0,0 +1,134 @@
+package plugins
+
+import (
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/docker/docker/pkg/plugins/transport"
+ "github.com/docker/go-connections/tlsconfig"
+)
+
+var (
+ mux *http.ServeMux
+ server *httptest.Server
+)
+
+func setupRemotePluginServer() string {
+ mux = http.NewServeMux()
+ server = httptest.NewServer(mux)
+ return server.URL
+}
+
+func teardownRemotePluginServer() {
+ if server != nil {
+ server.Close()
+ }
+}
+
+func TestFailedConnection(t *testing.T) {
+ c, _ := NewClient("tcp://127.0.0.1:1", &tlsconfig.Options{InsecureSkipVerify: true})
+ _, err := c.callWithRetry("Service.Method", nil, false)
+ if err == nil {
+ t.Fatal("Unexpected successful connection")
+ }
+}
+
+func TestEchoInputOutput(t *testing.T) {
+ addr := setupRemotePluginServer()
+ defer teardownRemotePluginServer()
+
+ m := Manifest{[]string{"VolumeDriver", "NetworkDriver"}}
+
+ mux.HandleFunc("/Test.Echo", func(w http.ResponseWriter, r *http.Request) {
+ if r.Method != "POST" {
+ t.Fatalf("Expected POST, got %s\n", r.Method)
+ }
+
+ header := w.Header()
+ header.Set("Content-Type", transport.VersionMimetype)
+
+ io.Copy(w, r.Body)
+ })
+
+ c, _ := NewClient(addr, &tlsconfig.Options{InsecureSkipVerify: true})
+ var output Manifest
+ err := c.Call("Test.Echo", m, &output)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !reflect.DeepEqual(output, m) {
+ t.Fatalf("Expected %v, was %v\n", m, output)
+ }
+ err = c.Call("Test.Echo", nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestBackoff(t *testing.T) {
+ cases := []struct {
+ retries int
+ expTimeOff time.Duration
+ }{
+ {0, time.Duration(1)},
+ {1, time.Duration(2)},
+ {2, time.Duration(4)},
+ {4, time.Duration(16)},
+ {6, time.Duration(30)},
+ {10, time.Duration(30)},
+ }
+
+ for _, c := range cases {
+ s := c.expTimeOff * time.Second
+ if d := backoff(c.retries); d != s {
+ t.Fatalf("Retry %v, expected %v, was %v\n", c.retries, s, d)
+ }
+ }
+}
+
+func TestAbortRetry(t *testing.T) {
+ cases := []struct {
+ timeOff time.Duration
+ expAbort bool
+ }{
+ {time.Duration(1), false},
+ {time.Duration(2), false},
+ {time.Duration(10), false},
+ {time.Duration(30), true},
+ {time.Duration(40), true},
+ }
+
+ for _, c := range cases {
+ s := c.timeOff * time.Second
+ if a := abort(time.Now(), s); a != c.expAbort {
+ t.Fatalf("Duration %v, expected %v, was %v\n", c.timeOff, s, a)
+ }
+ }
+}
+
+func TestClientScheme(t *testing.T) {
+ cases := map[string]string{
+ "tcp://127.0.0.1:8080": "http",
+ "unix:///usr/local/plugins/foo": "http",
+ "http://127.0.0.1:8080": "http",
+ "https://127.0.0.1:8080": "https",
+ }
+
+ for addr, scheme := range cases {
+ u, err := url.Parse(addr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ s := httpScheme(u)
+
+ if s != scheme {
+ t.Fatalf("URL scheme mismatch, expected %s, got %s", scheme, s)
+ }
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery.go b/vendor/github.com/docker/docker/pkg/plugins/discovery.go
new file mode 100644
index 00000000000..2077f2abc58
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/plugins/discovery.go
@@ -0,0 +1,132 @@
+package plugins
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+)
+
+var (
+ // ErrNotFound plugin not found
+ ErrNotFound = errors.New("plugin not found")
+ socketsPath = "/run/docker/plugins"
+ specsPaths = []string{"/etc/docker/plugins", "/usr/lib/docker/plugins"}
+)
+
+// localRegistry defines a registry that is local (using unix socket).
+type localRegistry struct{}
+
+func newLocalRegistry() localRegistry {
+ return localRegistry{}
+}
+
+// Scan scans all the plugin paths and returns all the names it found
+func Scan() ([]string, error) {
+ var names []string
+ if err := filepath.Walk(socketsPath, func(path string, fi os.FileInfo, err error) error {
+ if err != nil {
+ return nil
+ }
+
+ if fi.Mode()&os.ModeSocket != 0 {
+ name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name()))
+ names = append(names, name)
+ }
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+
+ for _, path := range specsPaths {
+ if err := filepath.Walk(path, func(p string, fi os.FileInfo, err error) error {
+ if err != nil || fi.IsDir() {
+ return nil
+ }
+ name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name()))
+ names = append(names, name)
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+ }
+ return names, nil
+}
+
+// Plugin returns the plugin registered with the given name (or returns an error).
+func (l *localRegistry) Plugin(name string) (*Plugin, error) {
+ socketpaths := pluginPaths(socketsPath, name, ".sock")
+
+ for _, p := range socketpaths {
+ if fi, err := os.Stat(p); err == nil && fi.Mode()&os.ModeSocket != 0 {
+ return NewLocalPlugin(name, "unix://"+p), nil
+ }
+ }
+
+ var txtspecpaths []string
+ for _, p := range specsPaths {
+ txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".spec")...)
+ txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".json")...)
+ }
+
+ for _, p := range txtspecpaths {
+ if _, err := os.Stat(p); err == nil {
+ if strings.HasSuffix(p, ".json") {
+ return readPluginJSONInfo(name, p)
+ }
+ return readPluginInfo(name, p)
+ }
+ }
+ return nil, ErrNotFound
+}
+
+func readPluginInfo(name, path string) (*Plugin, error) {
+ content, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+ addr := strings.TrimSpace(string(content))
+
+ u, err := url.Parse(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(u.Scheme) == 0 {
+ return nil, fmt.Errorf("Unknown protocol")
+ }
+
+ return NewLocalPlugin(name, addr), nil
+}
+
+func readPluginJSONInfo(name, path string) (*Plugin, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ var p Plugin
+ if err := json.NewDecoder(f).Decode(&p); err != nil {
+ return nil, err
+ }
+ p.name = name
+ if len(p.TLSConfig.CAFile) == 0 {
+ p.TLSConfig.InsecureSkipVerify = true
+ }
+ p.activateWait = sync.NewCond(&sync.Mutex{})
+
+ return &p, nil
+}
+
+func pluginPaths(base, name, ext string) []string {
+ return []string{
+ filepath.Join(base, name+ext),
+ filepath.Join(base, name, name+ext),
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery_test.go b/vendor/github.com/docker/docker/pkg/plugins/discovery_test.go
new file mode 100644
index 00000000000..f74090ee218
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/plugins/discovery_test.go
@@ -0,0 +1,119 @@
+package plugins
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+func Setup(t *testing.T) (string, func()) {
+ tmpdir, err := ioutil.TempDir("", "docker-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ backup := socketsPath
+ socketsPath = tmpdir
+ specsPaths = []string{tmpdir}
+
+ return tmpdir, func() {
+ socketsPath = backup
+ os.RemoveAll(tmpdir)
+ }
+}
+
+func TestFileSpecPlugin(t *testing.T) {
+ tmpdir, unregister := Setup(t)
+ defer unregister()
+
+ cases := []struct {
+ path string
+ name string
+ addr string
+ fail bool
+ }{
+ // TODO Windows: Factor out the unix:// variants.
+ {filepath.Join(tmpdir, "echo.spec"), "echo", "unix://var/lib/docker/plugins/echo.sock", false},
+ {filepath.Join(tmpdir, "echo", "echo.spec"), "echo", "unix://var/lib/docker/plugins/echo.sock", false},
+ {filepath.Join(tmpdir, "foo.spec"), "foo", "tcp://localhost:8080", false},
+ {filepath.Join(tmpdir, "foo", "foo.spec"), "foo", "tcp://localhost:8080", false},
+ {filepath.Join(tmpdir, "bar.spec"), "bar", "localhost:8080", true}, // unknown transport
+ }
+
+ for _, c := range cases {
+ if err := os.MkdirAll(filepath.Dir(c.path), 0755); err != nil {
+ t.Fatal(err)
+ }
+ if err := ioutil.WriteFile(c.path, []byte(c.addr), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ r := newLocalRegistry()
+ p, err := r.Plugin(c.name)
+ if c.fail && err == nil {
+ continue
+ }
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if p.name != c.name {
+ t.Fatalf("Expected plugin `%s`, got %s\n", c.name, p.Name)
+ }
+
+ if p.Addr != c.addr {
+ t.Fatalf("Expected plugin addr `%s`, got %s\n", c.addr, p.Addr)
+ }
+
+ if p.TLSConfig.InsecureSkipVerify != true {
+ t.Fatalf("Expected TLS verification to be skipped")
+ }
+ }
+}
+
+func TestFileJSONSpecPlugin(t *testing.T) {
+ tmpdir, unregister := Setup(t)
+ defer unregister()
+
+ p := filepath.Join(tmpdir, "example.json")
+ spec := `{
+ "Name": "plugin-example",
+ "Addr": "https://example.com/docker/plugin",
+ "TLSConfig": {
+ "CAFile": "/usr/shared/docker/certs/example-ca.pem",
+ "CertFile": "/usr/shared/docker/certs/example-cert.pem",
+ "KeyFile": "/usr/shared/docker/certs/example-key.pem"
+ }
+}`
+
+ if err := ioutil.WriteFile(p, []byte(spec), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ r := newLocalRegistry()
+ plugin, err := r.Plugin("example")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if plugin.name != "example" {
+ t.Fatalf("Expected plugin `plugin-example`, got %s\n", plugin.Name)
+ }
+
+ if plugin.Addr != "https://example.com/docker/plugin" {
+ t.Fatalf("Expected plugin addr `https://example.com/docker/plugin`, got %s\n", plugin.Addr)
+ }
+
+ if plugin.TLSConfig.CAFile != "/usr/shared/docker/certs/example-ca.pem" {
+ t.Fatalf("Expected plugin CA `/usr/shared/docker/certs/example-ca.pem`, got %s\n", plugin.TLSConfig.CAFile)
+ }
+
+ if plugin.TLSConfig.CertFile != "/usr/shared/docker/certs/example-cert.pem" {
+ t.Fatalf("Expected plugin Certificate `/usr/shared/docker/certs/example-cert.pem`, got %s\n", plugin.TLSConfig.CertFile)
+ }
+
+ if plugin.TLSConfig.KeyFile != "/usr/shared/docker/certs/example-key.pem" {
+ t.Fatalf("Expected plugin Key `/usr/shared/docker/certs/example-key.pem`, got %s\n", plugin.TLSConfig.KeyFile)
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery_unix_test.go b/vendor/github.com/docker/docker/pkg/plugins/discovery_unix_test.go
new file mode 100644
index 00000000000..53e02d28589
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/plugins/discovery_unix_test.go
@@ -0,0 +1,61 @@
+// +build !windows
+
+package plugins
+
+import (
+ "fmt"
+ "net"
+ "os"
+ "path/filepath"
+ "reflect"
+ "testing"
+)
+
+func TestLocalSocket(t *testing.T) {
+ // TODO Windows: Enable a similar version for Windows named pipes
+ tmpdir, unregister := Setup(t)
+ defer unregister()
+
+ cases := []string{
+ filepath.Join(tmpdir, "echo.sock"),
+ filepath.Join(tmpdir, "echo", "echo.sock"),
+ }
+
+ for _, c := range cases {
+ if err := os.MkdirAll(filepath.Dir(c), 0755); err != nil {
+ t.Fatal(err)
+ }
+
+ l, err := net.Listen("unix", c)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ r := newLocalRegistry()
+ p, err := r.Plugin("echo")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ pp, err := r.Plugin("echo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(p, pp) {
+ t.Fatalf("Expected %v, was %v\n", p, pp)
+ }
+
+ if p.name != "echo" {
+ t.Fatalf("Expected plugin `echo`, got %s\n", p.Name)
+ }
+
+ addr := fmt.Sprintf("unix://%s", c)
+ if p.Addr != addr {
+ t.Fatalf("Expected plugin addr `%s`, got %s\n", addr, p.Addr)
+ }
+ if p.TLSConfig.InsecureSkipVerify != true {
+ t.Fatalf("Expected TLS verification to be skipped")
+ }
+ l.Close()
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/plugins/errors.go b/vendor/github.com/docker/docker/pkg/plugins/errors.go
new file mode 100644
index 00000000000..7988471026d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/plugins/errors.go
@@ -0,0 +1,33 @@
+package plugins
+
+import (
+ "fmt"
+ "net/http"
+)
+
+type statusError struct {
+ status int
+ method string
+ err string
+}
+
+// Error returns a formatted string for this error type
+func (e *statusError) Error() string {
+ return fmt.Sprintf("%s: %v", e.method, e.err)
+}
+
+// IsNotFound indicates if the passed in error is from an http.StatusNotFound from the plugin
+func IsNotFound(err error) bool {
+ return isStatusError(err, http.StatusNotFound)
+}
+
+func isStatusError(err error, status int) bool {
+ if err == nil {
+ return false
+ }
+ e, ok := err.(*statusError)
+ if !ok {
+ return false
+ }
+ return e.status == status
+}
diff --git a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/README.md b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/README.md
new file mode 100644
index 00000000000..0418a3e00a1
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/README.md
@@ -0,0 +1,58 @@
+Plugin RPC Generator
+====================
+
+Generates go code from a Go interface definition for proxying between the plugin
+API and the subsystem being extended.
+
+## Usage
+
+Given an interface definition:
+
+```go
+type volumeDriver interface {
+ Create(name string, opts opts) (err error)
+ Remove(name string) (err error)
+ Path(name string) (mountpoint string, err error)
+ Mount(name string) (mountpoint string, err error)
+ Unmount(name string) (err error)
+}
+```
+
+**Note**: All function options and return values must be named in the definition.
+
+Run the generator:
+
+```bash
+$ pluginrpc-gen --type volumeDriver --name VolumeDriver -i volumes/drivers/extpoint.go -o volumes/drivers/proxy.go
+```
+
+Where:
+- `--type` is the name of the interface to use
+- `--name` is the subsystem that the plugin "Implements"
+- `-i` is the input file containing the interface definition
+- `-o` is the output file where the the generated code should go
+
+**Note**: The generated code will use the same package name as the one defined in the input file
+
+Optionally, you can skip functions on the interface that should not be
+implemented in the generated proxy code by passing in the function name to `--skip`.
+This flag can be specified multiple times.
+
+You can also add build tags that should be prepended to the generated code by
+supplying `--tag`. This flag can be specified multiple times.
+
+## Known issues
+
+## go-generate
+
+You can also use this with go-generate, which is pretty awesome.
+To do so, place the code at the top of the file which contains the interface
+definition (i.e., the input file):
+
+```go
+//go:generate pluginrpc-gen -i $GOFILE -o proxy.go -type volumeDriver -name VolumeDriver
+```
+
+Then cd to the package dir and run `go generate`
+
+**Note**: the `pluginrpc-gen` binary must be within your `$PATH`
diff --git a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/foo.go b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/foo.go
new file mode 100644
index 00000000000..5695dcc2d41
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/foo.go
@@ -0,0 +1,89 @@
+package foo
+
+import (
+ "fmt"
+
+ aliasedio "io"
+
+ "github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/otherfixture"
+)
+
+var (
+ errFakeImport = fmt.Errorf("just to import fmt for imports tests")
+)
+
+type wobble struct {
+ Some string
+ Val string
+ Inception *wobble
+}
+
+// Fooer is an empty interface used for tests.
+type Fooer interface{}
+
+// Fooer2 is an interface used for tests.
+type Fooer2 interface {
+ Foo()
+}
+
+// Fooer3 is an interface used for tests.
+type Fooer3 interface {
+ Foo()
+ Bar(a string)
+ Baz(a string) (err error)
+ Qux(a, b string) (val string, err error)
+ Wobble() (w *wobble)
+ Wiggle() (w wobble)
+ WiggleWobble(a []*wobble, b []wobble, c map[string]*wobble, d map[*wobble]wobble, e map[string][]wobble, f []*otherfixture.Spaceship) (g map[*wobble]wobble, h [][]*wobble, i otherfixture.Spaceship, j *otherfixture.Spaceship, k map[*otherfixture.Spaceship]otherfixture.Spaceship, l []otherfixture.Spaceship)
+}
+
+// Fooer4 is an interface used for tests.
+type Fooer4 interface {
+ Foo() error
+}
+
+// Bar is an interface used for tests.
+type Bar interface {
+ Boo(a string, b string) (s string, err error)
+}
+
+// Fooer5 is an interface used for tests.
+type Fooer5 interface {
+ Foo()
+ Bar
+}
+
+// Fooer6 is an interface used for tests.
+type Fooer6 interface {
+ Foo(a otherfixture.Spaceship)
+}
+
+// Fooer7 is an interface used for tests.
+type Fooer7 interface {
+ Foo(a *otherfixture.Spaceship)
+}
+
+// Fooer8 is an interface used for tests.
+type Fooer8 interface {
+ Foo(a map[string]otherfixture.Spaceship)
+}
+
+// Fooer9 is an interface used for tests.
+type Fooer9 interface {
+ Foo(a map[string]*otherfixture.Spaceship)
+}
+
+// Fooer10 is an interface used for tests.
+type Fooer10 interface {
+ Foo(a []otherfixture.Spaceship)
+}
+
+// Fooer11 is an interface used for tests.
+type Fooer11 interface {
+ Foo(a []*otherfixture.Spaceship)
+}
+
+// Fooer12 is an interface used for tests.
+type Fooer12 interface {
+ Foo(a aliasedio.Reader)
+}
diff --git a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go
new file mode 100644
index 00000000000..1937d1786ce
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go
@@ -0,0 +1,4 @@
+package otherfixture
+
+// Spaceship is a fixture for tests
+type Spaceship struct{}
diff --git a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/main.go b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/main.go
new file mode 100644
index 00000000000..e77a7d45ff7
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/main.go
@@ -0,0 +1,91 @@
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/format"
+ "io/ioutil"
+ "os"
+ "unicode"
+ "unicode/utf8"
+)
+
+type stringSet struct {
+ values map[string]struct{}
+}
+
+func (s stringSet) String() string {
+ return ""
+}
+
+func (s stringSet) Set(value string) error {
+ s.values[value] = struct{}{}
+ return nil
+}
+func (s stringSet) GetValues() map[string]struct{} {
+ return s.values
+}
+
+var (
+ typeName = flag.String("type", "", "interface type to generate plugin rpc proxy for")
+ rpcName = flag.String("name", *typeName, "RPC name, set if different from type")
+ inputFile = flag.String("i", "", "input file path")
+ outputFile = flag.String("o", *inputFile+"_proxy.go", "output file path")
+
+ skipFuncs map[string]struct{}
+ flSkipFuncs = stringSet{make(map[string]struct{})}
+
+ flBuildTags = stringSet{make(map[string]struct{})}
+)
+
+func errorOut(msg string, err error) {
+ if err == nil {
+ return
+ }
+ fmt.Fprintf(os.Stderr, "%s: %v\n", msg, err)
+ os.Exit(1)
+}
+
+func checkFlags() error {
+ if *outputFile == "" {
+ return fmt.Errorf("missing required flag `-o`")
+ }
+ if *inputFile == "" {
+ return fmt.Errorf("missing required flag `-i`")
+ }
+ return nil
+}
+
+func main() {
+ flag.Var(flSkipFuncs, "skip", "skip parsing for function")
+ flag.Var(flBuildTags, "tag", "build tags to add to generated files")
+ flag.Parse()
+ skipFuncs = flSkipFuncs.GetValues()
+
+ errorOut("error", checkFlags())
+
+ pkg, err := Parse(*inputFile, *typeName)
+ errorOut(fmt.Sprintf("error parsing requested type %s", *typeName), err)
+
+ var analysis = struct {
+ InterfaceType string
+ RPCName string
+ BuildTags map[string]struct{}
+ *ParsedPkg
+ }{toLower(*typeName), *rpcName, flBuildTags.GetValues(), pkg}
+ var buf bytes.Buffer
+
+ errorOut("parser error", generatedTempl.Execute(&buf, analysis))
+ src, err := format.Source(buf.Bytes())
+ errorOut("error formatting generated source:\n"+buf.String(), err)
+ errorOut("error writing file", ioutil.WriteFile(*outputFile, src, 0644))
+}
+
+func toLower(s string) string {
+ if s == "" {
+ return ""
+ }
+ r, n := utf8.DecodeRuneInString(s)
+ return string(unicode.ToLower(r)) + s[n:]
+}
diff --git a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser.go b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser.go
new file mode 100644
index 00000000000..6c547e18cf9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser.go
@@ -0,0 +1,263 @@
+package main
+
+import (
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "path"
+ "reflect"
+ "strings"
+)
+
+var errBadReturn = errors.New("found return arg with no name: all args must be named")
+
+type errUnexpectedType struct {
+ expected string
+ actual interface{}
+}
+
+func (e errUnexpectedType) Error() string {
+ return fmt.Sprintf("got wrong type expecting %s, got: %v", e.expected, reflect.TypeOf(e.actual))
+}
+
+// ParsedPkg holds information about a package that has been parsed,
+// its name and the list of functions.
+type ParsedPkg struct {
+ Name string
+ Functions []function
+ Imports []importSpec
+}
+
+type function struct {
+ Name string
+ Args []arg
+ Returns []arg
+ Doc string
+}
+
+type arg struct {
+ Name string
+ ArgType string
+ PackageSelector string
+}
+
+func (a *arg) String() string {
+ return a.Name + " " + a.ArgType
+}
+
+type importSpec struct {
+ Name string
+ Path string
+}
+
+func (s *importSpec) String() string {
+ var ss string
+ if len(s.Name) != 0 {
+ ss += s.Name
+ }
+ ss += s.Path
+ return ss
+}
+
+// Parse parses the given file for an interface definition with the given name.
+func Parse(filePath string, objName string) (*ParsedPkg, error) {
+ fs := token.NewFileSet()
+ pkg, err := parser.ParseFile(fs, filePath, nil, parser.AllErrors)
+ if err != nil {
+ return nil, err
+ }
+ p := &ParsedPkg{}
+ p.Name = pkg.Name.Name
+ obj, exists := pkg.Scope.Objects[objName]
+ if !exists {
+ return nil, fmt.Errorf("could not find object %s in %s", objName, filePath)
+ }
+ if obj.Kind != ast.Typ {
+ return nil, fmt.Errorf("exected type, got %s", obj.Kind)
+ }
+ spec, ok := obj.Decl.(*ast.TypeSpec)
+ if !ok {
+ return nil, errUnexpectedType{"*ast.TypeSpec", obj.Decl}
+ }
+ iface, ok := spec.Type.(*ast.InterfaceType)
+ if !ok {
+ return nil, errUnexpectedType{"*ast.InterfaceType", spec.Type}
+ }
+
+ p.Functions, err = parseInterface(iface)
+ if err != nil {
+ return nil, err
+ }
+
+ // figure out what imports will be needed
+ imports := make(map[string]importSpec)
+ for _, f := range p.Functions {
+ args := append(f.Args, f.Returns...)
+ for _, arg := range args {
+ if len(arg.PackageSelector) == 0 {
+ continue
+ }
+
+ for _, i := range pkg.Imports {
+ if i.Name != nil {
+ if i.Name.Name != arg.PackageSelector {
+ continue
+ }
+ imports[i.Path.Value] = importSpec{Name: arg.PackageSelector, Path: i.Path.Value}
+ break
+ }
+
+ _, name := path.Split(i.Path.Value)
+ splitName := strings.Split(name, "-")
+ if len(splitName) > 1 {
+ name = splitName[len(splitName)-1]
+ }
+ // import paths have quotes already added in, so need to remove them for name comparison
+ name = strings.TrimPrefix(name, `"`)
+ name = strings.TrimSuffix(name, `"`)
+ if name == arg.PackageSelector {
+ imports[i.Path.Value] = importSpec{Path: i.Path.Value}
+ break
+ }
+ }
+ }
+ }
+
+ for _, spec := range imports {
+ p.Imports = append(p.Imports, spec)
+ }
+
+ return p, nil
+}
+
+func parseInterface(iface *ast.InterfaceType) ([]function, error) {
+ var functions []function
+ for _, field := range iface.Methods.List {
+ switch f := field.Type.(type) {
+ case *ast.FuncType:
+ method, err := parseFunc(field)
+ if err != nil {
+ return nil, err
+ }
+ if method == nil {
+ continue
+ }
+ functions = append(functions, *method)
+ case *ast.Ident:
+ spec, ok := f.Obj.Decl.(*ast.TypeSpec)
+ if !ok {
+ return nil, errUnexpectedType{"*ast.TypeSpec", f.Obj.Decl}
+ }
+ iface, ok := spec.Type.(*ast.InterfaceType)
+ if !ok {
+ return nil, errUnexpectedType{"*ast.TypeSpec", spec.Type}
+ }
+ funcs, err := parseInterface(iface)
+ if err != nil {
+ fmt.Println(err)
+ continue
+ }
+ functions = append(functions, funcs...)
+ default:
+ return nil, errUnexpectedType{"*astFuncType or *ast.Ident", f}
+ }
+ }
+ return functions, nil
+}
+
+func parseFunc(field *ast.Field) (*function, error) {
+ f := field.Type.(*ast.FuncType)
+ method := &function{Name: field.Names[0].Name}
+ if _, exists := skipFuncs[method.Name]; exists {
+ fmt.Println("skipping:", method.Name)
+ return nil, nil
+ }
+ if f.Params != nil {
+ args, err := parseArgs(f.Params.List)
+ if err != nil {
+ return nil, err
+ }
+ method.Args = args
+ }
+ if f.Results != nil {
+ returns, err := parseArgs(f.Results.List)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing function returns for %q: %v", method.Name, err)
+ }
+ method.Returns = returns
+ }
+ return method, nil
+}
+
+func parseArgs(fields []*ast.Field) ([]arg, error) {
+ var args []arg
+ for _, f := range fields {
+ if len(f.Names) == 0 {
+ return nil, errBadReturn
+ }
+ for _, name := range f.Names {
+ p, err := parseExpr(f.Type)
+ if err != nil {
+ return nil, err
+ }
+ args = append(args, arg{name.Name, p.value, p.pkg})
+ }
+ }
+ return args, nil
+}
+
+type parsedExpr struct {
+ value string
+ pkg string
+}
+
+func parseExpr(e ast.Expr) (parsedExpr, error) {
+ var parsed parsedExpr
+ switch i := e.(type) {
+ case *ast.Ident:
+ parsed.value += i.Name
+ case *ast.StarExpr:
+ p, err := parseExpr(i.X)
+ if err != nil {
+ return parsed, err
+ }
+ parsed.value += "*"
+ parsed.value += p.value
+ parsed.pkg = p.pkg
+ case *ast.SelectorExpr:
+ p, err := parseExpr(i.X)
+ if err != nil {
+ return parsed, err
+ }
+ parsed.pkg = p.value
+ parsed.value += p.value + "."
+ parsed.value += i.Sel.Name
+ case *ast.MapType:
+ parsed.value += "map["
+ p, err := parseExpr(i.Key)
+ if err != nil {
+ return parsed, err
+ }
+ parsed.value += p.value
+ parsed.value += "]"
+ p, err = parseExpr(i.Value)
+ if err != nil {
+ return parsed, err
+ }
+ parsed.value += p.value
+ parsed.pkg = p.pkg
+ case *ast.ArrayType:
+ parsed.value += "[]"
+ p, err := parseExpr(i.Elt)
+ if err != nil {
+ return parsed, err
+ }
+ parsed.value += p.value
+ parsed.pkg = p.pkg
+ default:
+ return parsed, errUnexpectedType{"*ast.Ident or *ast.StarExpr", i}
+ }
+ return parsed, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser_test.go b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser_test.go
new file mode 100644
index 00000000000..a1b1ac9567f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser_test.go
@@ -0,0 +1,222 @@
+package main
+
+import (
+ "fmt"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+const testFixture = "fixtures/foo.go"
+
+func TestParseEmptyInterface(t *testing.T) {
+ pkg, err := Parse(testFixture, "Fooer")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ assertName(t, "foo", pkg.Name)
+ assertNum(t, 0, len(pkg.Functions))
+}
+
+func TestParseNonInterfaceType(t *testing.T) {
+ _, err := Parse(testFixture, "wobble")
+ if _, ok := err.(errUnexpectedType); !ok {
+ t.Fatal("expected type error when parsing non-interface type")
+ }
+}
+
+func TestParseWithOneFunction(t *testing.T) {
+ pkg, err := Parse(testFixture, "Fooer2")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ assertName(t, "foo", pkg.Name)
+ assertNum(t, 1, len(pkg.Functions))
+ assertName(t, "Foo", pkg.Functions[0].Name)
+ assertNum(t, 0, len(pkg.Functions[0].Args))
+ assertNum(t, 0, len(pkg.Functions[0].Returns))
+}
+
+func TestParseWithMultipleFuncs(t *testing.T) {
+ pkg, err := Parse(testFixture, "Fooer3")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ assertName(t, "foo", pkg.Name)
+ assertNum(t, 7, len(pkg.Functions))
+
+ f := pkg.Functions[0]
+ assertName(t, "Foo", f.Name)
+ assertNum(t, 0, len(f.Args))
+ assertNum(t, 0, len(f.Returns))
+
+ f = pkg.Functions[1]
+ assertName(t, "Bar", f.Name)
+ assertNum(t, 1, len(f.Args))
+ assertNum(t, 0, len(f.Returns))
+ arg := f.Args[0]
+ assertName(t, "a", arg.Name)
+ assertName(t, "string", arg.ArgType)
+
+ f = pkg.Functions[2]
+ assertName(t, "Baz", f.Name)
+ assertNum(t, 1, len(f.Args))
+ assertNum(t, 1, len(f.Returns))
+ arg = f.Args[0]
+ assertName(t, "a", arg.Name)
+ assertName(t, "string", arg.ArgType)
+ arg = f.Returns[0]
+ assertName(t, "err", arg.Name)
+ assertName(t, "error", arg.ArgType)
+
+ f = pkg.Functions[3]
+ assertName(t, "Qux", f.Name)
+ assertNum(t, 2, len(f.Args))
+ assertNum(t, 2, len(f.Returns))
+ arg = f.Args[0]
+ assertName(t, "a", f.Args[0].Name)
+ assertName(t, "string", f.Args[0].ArgType)
+ arg = f.Args[1]
+ assertName(t, "b", arg.Name)
+ assertName(t, "string", arg.ArgType)
+ arg = f.Returns[0]
+ assertName(t, "val", arg.Name)
+ assertName(t, "string", arg.ArgType)
+ arg = f.Returns[1]
+ assertName(t, "err", arg.Name)
+ assertName(t, "error", arg.ArgType)
+
+ f = pkg.Functions[4]
+ assertName(t, "Wobble", f.Name)
+ assertNum(t, 0, len(f.Args))
+ assertNum(t, 1, len(f.Returns))
+ arg = f.Returns[0]
+ assertName(t, "w", arg.Name)
+ assertName(t, "*wobble", arg.ArgType)
+
+ f = pkg.Functions[5]
+ assertName(t, "Wiggle", f.Name)
+ assertNum(t, 0, len(f.Args))
+ assertNum(t, 1, len(f.Returns))
+ arg = f.Returns[0]
+ assertName(t, "w", arg.Name)
+ assertName(t, "wobble", arg.ArgType)
+
+ f = pkg.Functions[6]
+ assertName(t, "WiggleWobble", f.Name)
+ assertNum(t, 6, len(f.Args))
+ assertNum(t, 6, len(f.Returns))
+ expectedArgs := [][]string{
+ {"a", "[]*wobble"},
+ {"b", "[]wobble"},
+ {"c", "map[string]*wobble"},
+ {"d", "map[*wobble]wobble"},
+ {"e", "map[string][]wobble"},
+ {"f", "[]*otherfixture.Spaceship"},
+ }
+ for i, arg := range f.Args {
+ assertName(t, expectedArgs[i][0], arg.Name)
+ assertName(t, expectedArgs[i][1], arg.ArgType)
+ }
+ expectedReturns := [][]string{
+ {"g", "map[*wobble]wobble"},
+ {"h", "[][]*wobble"},
+ {"i", "otherfixture.Spaceship"},
+ {"j", "*otherfixture.Spaceship"},
+ {"k", "map[*otherfixture.Spaceship]otherfixture.Spaceship"},
+ {"l", "[]otherfixture.Spaceship"},
+ }
+ for i, ret := range f.Returns {
+ assertName(t, expectedReturns[i][0], ret.Name)
+ assertName(t, expectedReturns[i][1], ret.ArgType)
+ }
+}
+
+func TestParseWithUnamedReturn(t *testing.T) {
+ _, err := Parse(testFixture, "Fooer4")
+ if !strings.HasSuffix(err.Error(), errBadReturn.Error()) {
+ t.Fatalf("expected ErrBadReturn, got %v", err)
+ }
+}
+
+func TestEmbeddedInterface(t *testing.T) {
+ pkg, err := Parse(testFixture, "Fooer5")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ assertName(t, "foo", pkg.Name)
+ assertNum(t, 2, len(pkg.Functions))
+
+ f := pkg.Functions[0]
+ assertName(t, "Foo", f.Name)
+ assertNum(t, 0, len(f.Args))
+ assertNum(t, 0, len(f.Returns))
+
+ f = pkg.Functions[1]
+ assertName(t, "Boo", f.Name)
+ assertNum(t, 2, len(f.Args))
+ assertNum(t, 2, len(f.Returns))
+
+ arg := f.Args[0]
+ assertName(t, "a", arg.Name)
+ assertName(t, "string", arg.ArgType)
+
+ arg = f.Args[1]
+ assertName(t, "b", arg.Name)
+ assertName(t, "string", arg.ArgType)
+
+ arg = f.Returns[0]
+ assertName(t, "s", arg.Name)
+ assertName(t, "string", arg.ArgType)
+
+ arg = f.Returns[1]
+ assertName(t, "err", arg.Name)
+ assertName(t, "error", arg.ArgType)
+}
+
+func TestParsedImports(t *testing.T) {
+ cases := []string{"Fooer6", "Fooer7", "Fooer8", "Fooer9", "Fooer10", "Fooer11"}
+ for _, testCase := range cases {
+ pkg, err := Parse(testFixture, testCase)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ assertNum(t, 1, len(pkg.Imports))
+ importPath := strings.Split(pkg.Imports[0].Path, "/")
+ assertName(t, "otherfixture\"", importPath[len(importPath)-1])
+ assertName(t, "", pkg.Imports[0].Name)
+ }
+}
+
+func TestAliasedImports(t *testing.T) {
+ pkg, err := Parse(testFixture, "Fooer12")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ assertNum(t, 1, len(pkg.Imports))
+ assertName(t, "aliasedio", pkg.Imports[0].Name)
+}
+
+func assertName(t *testing.T, expected, actual string) {
+ if expected != actual {
+ fatalOut(t, fmt.Sprintf("expected name to be `%s`, got: %s", expected, actual))
+ }
+}
+
+func assertNum(t *testing.T, expected, actual int) {
+ if expected != actual {
+ fatalOut(t, fmt.Sprintf("expected number to be %d, got: %d", expected, actual))
+ }
+}
+
+func fatalOut(t *testing.T, msg string) {
+ _, file, ln, _ := runtime.Caller(2)
+ t.Fatalf("%s:%d: %s", filepath.Base(file), ln, msg)
+}
diff --git a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/template.go b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/template.go
new file mode 100644
index 00000000000..50ed9293c11
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/template.go
@@ -0,0 +1,118 @@
+package main
+
+import (
+ "strings"
+ "text/template"
+)
+
+func printArgs(args []arg) string {
+ var argStr []string
+ for _, arg := range args {
+ argStr = append(argStr, arg.String())
+ }
+ return strings.Join(argStr, ", ")
+}
+
+func buildImports(specs []importSpec) string {
+ if len(specs) == 0 {
+ return `import "errors"`
+ }
+ imports := "import(\n"
+ imports += "\t\"errors\"\n"
+ for _, i := range specs {
+ imports += "\t" + i.String() + "\n"
+ }
+ imports += ")"
+ return imports
+}
+
+func marshalType(t string) string {
+ switch t {
+ case "error":
+ // convert error types to plain strings to ensure the values are encoded/decoded properly
+ return "string"
+ default:
+ return t
+ }
+}
+
+func isErr(t string) bool {
+ switch t {
+ case "error":
+ return true
+ default:
+ return false
+ }
+}
+
+// Need to use this helper due to issues with go-vet
+func buildTag(s string) string {
+ return "+build " + s
+}
+
+var templFuncs = template.FuncMap{
+ "printArgs": printArgs,
+ "marshalType": marshalType,
+ "isErr": isErr,
+ "lower": strings.ToLower,
+ "title": title,
+ "tag": buildTag,
+ "imports": buildImports,
+}
+
+func title(s string) string {
+ if strings.ToLower(s) == "id" {
+ return "ID"
+ }
+ return strings.Title(s)
+}
+
+var generatedTempl = template.Must(template.New("rpc_cient").Funcs(templFuncs).Parse(`
+// generated code - DO NOT EDIT
+{{ range $k, $v := .BuildTags }}
+ // {{ tag $k }} {{ end }}
+
+package {{ .Name }}
+
+{{ imports .Imports }}
+
+type client interface{
+ Call(string, interface{}, interface{}) error
+}
+
+type {{ .InterfaceType }}Proxy struct {
+ client
+}
+
+{{ range .Functions }}
+ type {{ $.InterfaceType }}Proxy{{ .Name }}Request struct{
+ {{ range .Args }}
+ {{ title .Name }} {{ .ArgType }} {{ end }}
+ }
+
+ type {{ $.InterfaceType }}Proxy{{ .Name }}Response struct{
+ {{ range .Returns }}
+ {{ title .Name }} {{ marshalType .ArgType }} {{ end }}
+ }
+
+ func (pp *{{ $.InterfaceType }}Proxy) {{ .Name }}({{ printArgs .Args }}) ({{ printArgs .Returns }}) {
+ var(
+ req {{ $.InterfaceType }}Proxy{{ .Name }}Request
+ ret {{ $.InterfaceType }}Proxy{{ .Name }}Response
+ )
+ {{ range .Args }}
+ req.{{ title .Name }} = {{ lower .Name }} {{ end }}
+ if err = pp.Call("{{ $.RPCName }}.{{ .Name }}", req, &ret); err != nil {
+ return
+ }
+ {{ range $r := .Returns }}
+ {{ if isErr .ArgType }}
+ if ret.{{ title .Name }} != "" {
+ {{ lower .Name }} = errors.New(ret.{{ title .Name }})
+ } {{ end }}
+ {{ if isErr .ArgType | not }} {{ lower .Name }} = ret.{{ title .Name }} {{ end }} {{ end }}
+
+ return
+ }
+{{ end }}
+`))
diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins.go b/vendor/github.com/docker/docker/pkg/plugins/plugins.go
new file mode 100644
index 00000000000..debcd087c9d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/plugins/plugins.go
@@ -0,0 +1,274 @@
+// Package plugins provides structures and helper functions to manage Docker
+// plugins.
+//
+// Docker discovers plugins by looking for them in the plugin directory whenever
+// a user or container tries to use one by name. UNIX domain socket files must
+// be located under /run/docker/plugins, whereas spec files can be located
+// either under /etc/docker/plugins or /usr/lib/docker/plugins. This is handled
+// by the Registry interface, which lets you list all plugins or get a plugin by
+// its name if it exists.
+//
+// The plugins need to implement an HTTP server and bind this to the UNIX socket
+// or the address specified in the spec files.
+// A handshake is send at /Plugin.Activate, and plugins are expected to return
+// a Manifest with a list of of Docker subsystems which this plugin implements.
+//
+// In order to use a plugins, you can use the ``Get`` with the name of the
+// plugin and the subsystem it implements.
+//
+// plugin, err := plugins.Get("example", "VolumeDriver")
+// if err != nil {
+// return fmt.Errorf("Error looking up volume plugin example: %v", err)
+// }
+package plugins
+
+import (
+ "errors"
+ "sync"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/go-connections/tlsconfig"
+)
+
+var (
+ // ErrNotImplements is returned if the plugin does not implement the requested driver.
+ ErrNotImplements = errors.New("Plugin does not implement the requested driver")
+)
+
+type plugins struct {
+ sync.Mutex
+ plugins map[string]*Plugin
+}
+
+var (
+ storage = plugins{plugins: make(map[string]*Plugin)}
+ extpointHandlers = make(map[string]func(string, *Client))
+)
+
+// Manifest lists what a plugin implements.
+type Manifest struct {
+ // List of subsystem the plugin implements.
+ Implements []string
+}
+
+// Plugin is the definition of a docker plugin.
+type Plugin struct {
+ // Name of the plugin
+ name string
+ // Address of the plugin
+ Addr string
+ // TLS configuration of the plugin
+ TLSConfig *tlsconfig.Options
+ // Client attached to the plugin
+ client *Client
+ // Manifest of the plugin (see above)
+ Manifest *Manifest `json:"-"`
+
+ // error produced by activation
+ activateErr error
+ // specifies if the activation sequence is completed (not if it is successful or not)
+ activated bool
+ // wait for activation to finish
+ activateWait *sync.Cond
+}
+
+// Name returns the name of the plugin.
+func (p *Plugin) Name() string {
+ return p.name
+}
+
+// Client returns a ready-to-use plugin client that can be used to communicate with the plugin.
+func (p *Plugin) Client() *Client {
+ return p.client
+}
+
+// IsLegacy returns true for legacy plugins and false otherwise.
+func (p *Plugin) IsLegacy() bool {
+ return true
+}
+
+// NewLocalPlugin creates a new local plugin.
+func NewLocalPlugin(name, addr string) *Plugin {
+ return &Plugin{
+ name: name,
+ Addr: addr,
+ // TODO: change to nil
+ TLSConfig: &tlsconfig.Options{InsecureSkipVerify: true},
+ activateWait: sync.NewCond(&sync.Mutex{}),
+ }
+}
+
+func (p *Plugin) activate() error {
+ p.activateWait.L.Lock()
+ if p.activated {
+ p.activateWait.L.Unlock()
+ return p.activateErr
+ }
+
+ p.activateErr = p.activateWithLock()
+ p.activated = true
+
+ p.activateWait.L.Unlock()
+ p.activateWait.Broadcast()
+ return p.activateErr
+}
+
+func (p *Plugin) activateWithLock() error {
+ c, err := NewClient(p.Addr, p.TLSConfig)
+ if err != nil {
+ return err
+ }
+ p.client = c
+
+ m := new(Manifest)
+ if err = p.client.Call("Plugin.Activate", nil, m); err != nil {
+ return err
+ }
+
+ p.Manifest = m
+
+ for _, iface := range m.Implements {
+ handler, handled := extpointHandlers[iface]
+ if !handled {
+ continue
+ }
+ handler(p.name, p.client)
+ }
+ return nil
+}
+
+func (p *Plugin) waitActive() error {
+ p.activateWait.L.Lock()
+ for !p.activated {
+ p.activateWait.Wait()
+ }
+ p.activateWait.L.Unlock()
+ return p.activateErr
+}
+
+func (p *Plugin) implements(kind string) bool {
+ if err := p.waitActive(); err != nil {
+ return false
+ }
+ for _, driver := range p.Manifest.Implements {
+ if driver == kind {
+ return true
+ }
+ }
+ return false
+}
+
+func load(name string) (*Plugin, error) {
+ return loadWithRetry(name, true)
+}
+
+func loadWithRetry(name string, retry bool) (*Plugin, error) {
+ registry := newLocalRegistry()
+ start := time.Now()
+
+ var retries int
+ for {
+ pl, err := registry.Plugin(name)
+ if err != nil {
+ if !retry {
+ return nil, err
+ }
+
+ timeOff := backoff(retries)
+ if abort(start, timeOff) {
+ return nil, err
+ }
+ retries++
+ logrus.Warnf("Unable to locate plugin: %s, retrying in %v", name, timeOff)
+ time.Sleep(timeOff)
+ continue
+ }
+
+ storage.Lock()
+ storage.plugins[name] = pl
+ storage.Unlock()
+
+ err = pl.activate()
+
+ if err != nil {
+ storage.Lock()
+ delete(storage.plugins, name)
+ storage.Unlock()
+ }
+
+ return pl, err
+ }
+}
+
+func get(name string) (*Plugin, error) {
+ storage.Lock()
+ pl, ok := storage.plugins[name]
+ storage.Unlock()
+ if ok {
+ return pl, pl.activate()
+ }
+ return load(name)
+}
+
+// Get returns the plugin given the specified name and requested implementation.
+func Get(name, imp string) (*Plugin, error) {
+ pl, err := get(name)
+ if err != nil {
+ return nil, err
+ }
+ if pl.implements(imp) {
+ logrus.Debugf("%s implements: %s", name, imp)
+ return pl, nil
+ }
+ return nil, ErrNotImplements
+}
+
+// Handle adds the specified function to the extpointHandlers.
+func Handle(iface string, fn func(string, *Client)) {
+ extpointHandlers[iface] = fn
+}
+
+// GetAll returns all the plugins for the specified implementation
+func GetAll(imp string) ([]*Plugin, error) {
+ pluginNames, err := Scan()
+ if err != nil {
+ return nil, err
+ }
+
+ type plLoad struct {
+ pl *Plugin
+ err error
+ }
+
+ chPl := make(chan *plLoad, len(pluginNames))
+ var wg sync.WaitGroup
+ for _, name := range pluginNames {
+ if pl, ok := storage.plugins[name]; ok {
+ chPl <- &plLoad{pl, nil}
+ continue
+ }
+
+ wg.Add(1)
+ go func(name string) {
+ defer wg.Done()
+ pl, err := loadWithRetry(name, false)
+ chPl <- &plLoad{pl, err}
+ }(name)
+ }
+
+ wg.Wait()
+ close(chPl)
+
+ var out []*Plugin
+ for pl := range chPl {
+ if pl.err != nil {
+ logrus.Error(pl.err)
+ continue
+ }
+ if pl.pl.implements(imp) {
+ out = append(out, pl.pl)
+ }
+ }
+ return out, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/plugins/transport/http.go b/vendor/github.com/docker/docker/pkg/plugins/transport/http.go
new file mode 100644
index 00000000000..5be146af657
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/plugins/transport/http.go
@@ -0,0 +1,36 @@
+package transport
+
+import (
+ "io"
+ "net/http"
+)
+
+// httpTransport holds an http.RoundTripper
+// and information about the scheme and address the transport
+// sends request to.
+type httpTransport struct {
+ http.RoundTripper
+ scheme string
+ addr string
+}
+
+// NewHTTPTransport creates a new httpTransport.
+func NewHTTPTransport(r http.RoundTripper, scheme, addr string) Transport {
+ return httpTransport{
+ RoundTripper: r,
+ scheme: scheme,
+ addr: addr,
+ }
+}
+
+// NewRequest creates a new http.Request and sets the URL
+// scheme and address with the transport's fields.
+func (t httpTransport) NewRequest(path string, data io.Reader) (*http.Request, error) {
+ req, err := newHTTPRequest(path, data)
+ if err != nil {
+ return nil, err
+ }
+ req.URL.Scheme = t.scheme
+ req.URL.Host = t.addr
+ return req, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go b/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go
new file mode 100644
index 00000000000..d7f1e2100c4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go
@@ -0,0 +1,36 @@
+package transport
+
+import (
+ "io"
+ "net/http"
+ "strings"
+)
+
+// VersionMimetype is the Content-Type the engine sends to plugins.
+const VersionMimetype = "application/vnd.docker.plugins.v1.2+json"
+
+// RequestFactory defines an interface that
+// transports can implement to create new requests.
+type RequestFactory interface {
+ NewRequest(path string, data io.Reader) (*http.Request, error)
+}
+
+// Transport defines an interface that plugin transports
+// must implement.
+type Transport interface {
+ http.RoundTripper
+ RequestFactory
+}
+
+// newHTTPRequest creates a new request with a path and a body.
+func newHTTPRequest(path string, data io.Reader) (*http.Request, error) {
+ if !strings.HasPrefix(path, "/") {
+ path = "/" + path
+ }
+ req, err := http.NewRequest("POST", path, data)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Add("Accept", VersionMimetype)
+ return req, nil
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go
similarity index 95%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools.go
rename to vendor/github.com/docker/docker/pkg/pools/pools.go
index 515fb4d0508..6f5988e2677 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools.go
+++ b/vendor/github.com/docker/docker/pkg/pools/pools.go
@@ -14,7 +14,7 @@ import (
"io"
"sync"
- "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils"
+ "github.com/docker/docker/pkg/ioutils"
)
var (
@@ -28,7 +28,7 @@ const buffer32K = 32 * 1024
// BufioReaderPool is a bufio reader that uses sync.Pool.
type BufioReaderPool struct {
- pool sync.Pool
+ pool *sync.Pool
}
func init() {
@@ -39,7 +39,7 @@ func init() {
// newBufioReaderPoolWithSize is unexported because new pools should be
// added here to be shared where required.
func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
- pool := sync.Pool{
+ pool := &sync.Pool{
New: func() interface{} { return bufio.NewReaderSize(nil, size) },
}
return &BufioReaderPool{pool: pool}
@@ -80,13 +80,13 @@ func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Rea
// BufioWriterPool is a bufio writer that uses sync.Pool.
type BufioWriterPool struct {
- pool sync.Pool
+ pool *sync.Pool
}
// newBufioWriterPoolWithSize is unexported because new pools should be
// added here to be shared where required.
func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
- pool := sync.Pool{
+ pool := &sync.Pool{
New: func() interface{} { return bufio.NewWriterSize(nil, size) },
}
return &BufioWriterPool{pool: pool}
diff --git a/vendor/github.com/docker/docker/pkg/pools/pools_test.go b/vendor/github.com/docker/docker/pkg/pools/pools_test.go
new file mode 100644
index 00000000000..1661b780c90
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/pools/pools_test.go
@@ -0,0 +1,161 @@
+package pools
+
+import (
+ "bufio"
+ "bytes"
+ "io"
+ "strings"
+ "testing"
+)
+
+func TestBufioReaderPoolGetWithNoReaderShouldCreateOne(t *testing.T) {
+ reader := BufioReader32KPool.Get(nil)
+ if reader == nil {
+ t.Fatalf("BufioReaderPool should have create a bufio.Reader but did not.")
+ }
+}
+
+func TestBufioReaderPoolPutAndGet(t *testing.T) {
+ sr := bufio.NewReader(strings.NewReader("foobar"))
+ reader := BufioReader32KPool.Get(sr)
+ if reader == nil {
+ t.Fatalf("BufioReaderPool should not return a nil reader.")
+ }
+ // verify the first 3 byte
+ buf1 := make([]byte, 3)
+ _, err := reader.Read(buf1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if actual := string(buf1); actual != "foo" {
+ t.Fatalf("The first letter should have been 'foo' but was %v", actual)
+ }
+ BufioReader32KPool.Put(reader)
+ // Try to read the next 3 bytes
+ _, err = sr.Read(make([]byte, 3))
+ if err == nil || err != io.EOF {
+ t.Fatalf("The buffer should have been empty, issue an EOF error.")
+ }
+}
+
+type simpleReaderCloser struct {
+ io.Reader
+ closed bool
+}
+
+func (r *simpleReaderCloser) Close() error {
+ r.closed = true
+ return nil
+}
+
+func TestNewReadCloserWrapperWithAReadCloser(t *testing.T) {
+ br := bufio.NewReader(strings.NewReader(""))
+ sr := &simpleReaderCloser{
+ Reader: strings.NewReader("foobar"),
+ closed: false,
+ }
+ reader := BufioReader32KPool.NewReadCloserWrapper(br, sr)
+ if reader == nil {
+ t.Fatalf("NewReadCloserWrapper should not return a nil reader.")
+ }
+ // Verify the content of reader
+ buf := make([]byte, 3)
+ _, err := reader.Read(buf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if actual := string(buf); actual != "foo" {
+ t.Fatalf("The first 3 letter should have been 'foo' but were %v", actual)
+ }
+ reader.Close()
+ // Read 3 more bytes "bar"
+ _, err = reader.Read(buf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if actual := string(buf); actual != "bar" {
+ t.Fatalf("The first 3 letter should have been 'bar' but were %v", actual)
+ }
+ if !sr.closed {
+ t.Fatalf("The ReaderCloser should have been closed, it is not.")
+ }
+}
+
+func TestBufioWriterPoolGetWithNoReaderShouldCreateOne(t *testing.T) {
+ writer := BufioWriter32KPool.Get(nil)
+ if writer == nil {
+ t.Fatalf("BufioWriterPool should have create a bufio.Writer but did not.")
+ }
+}
+
+func TestBufioWriterPoolPutAndGet(t *testing.T) {
+ buf := new(bytes.Buffer)
+ bw := bufio.NewWriter(buf)
+ writer := BufioWriter32KPool.Get(bw)
+ if writer == nil {
+ t.Fatalf("BufioReaderPool should not return a nil writer.")
+ }
+ written, err := writer.Write([]byte("foobar"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if written != 6 {
+ t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written)
+ }
+ // Make sure we Flush all the way ?
+ writer.Flush()
+ bw.Flush()
+ if len(buf.Bytes()) != 6 {
+ t.Fatalf("The buffer should contain 6 bytes ('foobar') but contains %v ('%v')", buf.Bytes(), string(buf.Bytes()))
+ }
+ // Reset the buffer
+ buf.Reset()
+ BufioWriter32KPool.Put(writer)
+ // Try to write something
+ if _, err = writer.Write([]byte("barfoo")); err != nil {
+ t.Fatal(err)
+ }
+ // If we now try to flush it, it should panic (the writer is nil)
+ // recover it
+ defer func() {
+ if r := recover(); r == nil {
+ t.Fatal("Trying to flush the writter should have 'paniced', did not.")
+ }
+ }()
+ writer.Flush()
+}
+
+type simpleWriterCloser struct {
+ io.Writer
+ closed bool
+}
+
+func (r *simpleWriterCloser) Close() error {
+ r.closed = true
+ return nil
+}
+
+func TestNewWriteCloserWrapperWithAWriteCloser(t *testing.T) {
+ buf := new(bytes.Buffer)
+ bw := bufio.NewWriter(buf)
+ sw := &simpleWriterCloser{
+ Writer: new(bytes.Buffer),
+ closed: false,
+ }
+ bw.Flush()
+ writer := BufioWriter32KPool.NewWriteCloserWrapper(bw, sw)
+ if writer == nil {
+ t.Fatalf("BufioReaderPool should not return a nil writer.")
+ }
+ written, err := writer.Write([]byte("foobar"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if written != 6 {
+ t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written)
+ }
+ writer.Close()
+ if !sw.closed {
+ t.Fatalf("The ReaderCloser should have been closed, it is not.")
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/progress/progress.go b/vendor/github.com/docker/docker/pkg/progress/progress.go
new file mode 100644
index 00000000000..df3c2ba91a2
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/progress/progress.go
@@ -0,0 +1,73 @@
+package progress
+
+import (
+ "fmt"
+)
+
+// Progress represents the progress of a transfer.
+type Progress struct {
+ ID string
+
+ // Progress contains a Message or...
+ Message string
+
+ // ...progress of an action
+ Action string
+ Current int64
+ Total int64
+
+ // Aux contains extra information not presented to the user, such as
+ // digests for push signing.
+ Aux interface{}
+
+ LastUpdate bool
+}
+
+// Output is an interface for writing progress information. It's
+// like a writer for progress, but we don't call it Writer because
+// that would be confusing next to ProgressReader (also, because it
+// doesn't implement the io.Writer interface).
+type Output interface {
+ WriteProgress(Progress) error
+}
+
+type chanOutput chan<- Progress
+
+func (out chanOutput) WriteProgress(p Progress) error {
+ out <- p
+ return nil
+}
+
+// ChanOutput returns an Output that writes progress updates to the
+// supplied channel.
+func ChanOutput(progressChan chan<- Progress) Output {
+ return chanOutput(progressChan)
+}
+
+// Update is a convenience function to write a progress update to the channel.
+func Update(out Output, id, action string) {
+ out.WriteProgress(Progress{ID: id, Action: action})
+}
+
+// Updatef is a convenience function to write a printf-formatted progress update
+// to the channel.
+func Updatef(out Output, id, format string, a ...interface{}) {
+ Update(out, id, fmt.Sprintf(format, a...))
+}
+
+// Message is a convenience function to write a progress message to the channel.
+func Message(out Output, id, message string) {
+ out.WriteProgress(Progress{ID: id, Message: message})
+}
+
+// Messagef is a convenience function to write a printf-formatted progress
+// message to the channel.
+func Messagef(out Output, id, format string, a ...interface{}) {
+ Message(out, id, fmt.Sprintf(format, a...))
+}
+
+// Aux sends auxiliary information over a progress interface, which will not be
+// formatted for the UI. This is used for things such as push signing.
+func Aux(out Output, a interface{}) {
+ out.WriteProgress(Progress{Aux: a})
+}
diff --git a/vendor/github.com/docker/docker/pkg/progress/progressreader.go b/vendor/github.com/docker/docker/pkg/progress/progressreader.go
new file mode 100644
index 00000000000..c39e2b69fb2
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/progress/progressreader.go
@@ -0,0 +1,59 @@
+package progress
+
+import (
+ "io"
+)
+
+// Reader is a Reader with progress bar.
+type Reader struct {
+ in io.ReadCloser // Stream to read from
+ out Output // Where to send progress bar to
+ size int64
+ current int64
+ lastUpdate int64
+ id string
+ action string
+}
+
+// NewProgressReader creates a new ProgressReader.
+func NewProgressReader(in io.ReadCloser, out Output, size int64, id, action string) *Reader {
+ return &Reader{
+ in: in,
+ out: out,
+ size: size,
+ id: id,
+ action: action,
+ }
+}
+
+func (p *Reader) Read(buf []byte) (n int, err error) {
+ read, err := p.in.Read(buf)
+ p.current += int64(read)
+ updateEvery := int64(1024 * 512) //512kB
+ if p.size > 0 {
+ // Update progress for every 1% read if 1% < 512kB
+ if increment := int64(0.01 * float64(p.size)); increment < updateEvery {
+ updateEvery = increment
+ }
+ }
+ if p.current-p.lastUpdate > updateEvery || err != nil {
+ p.updateProgress(err != nil && read == 0)
+ p.lastUpdate = p.current
+ }
+
+ return read, err
+}
+
+// Close closes the progress reader and its underlying reader.
+func (p *Reader) Close() error {
+ if p.current < p.size {
+ // print a full progress bar when closing prematurely
+ p.current = p.size
+ p.updateProgress(false)
+ }
+ return p.in.Close()
+}
+
+func (p *Reader) updateProgress(last bool) {
+ p.out.WriteProgress(Progress{ID: p.id, Action: p.action, Current: p.current, Total: p.size, LastUpdate: last})
+}
diff --git a/vendor/github.com/docker/docker/pkg/progress/progressreader_test.go b/vendor/github.com/docker/docker/pkg/progress/progressreader_test.go
new file mode 100644
index 00000000000..b14d4015614
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/progress/progressreader_test.go
@@ -0,0 +1,75 @@
+package progress
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+ "testing"
+)
+
+func TestOutputOnPrematureClose(t *testing.T) {
+ content := []byte("TESTING")
+ reader := ioutil.NopCloser(bytes.NewReader(content))
+ progressChan := make(chan Progress, 10)
+
+ pr := NewProgressReader(reader, ChanOutput(progressChan), int64(len(content)), "Test", "Read")
+
+ part := make([]byte, 4, 4)
+ _, err := io.ReadFull(pr, part)
+ if err != nil {
+ pr.Close()
+ t.Fatal(err)
+ }
+
+drainLoop:
+ for {
+ select {
+ case <-progressChan:
+ default:
+ break drainLoop
+ }
+ }
+
+ pr.Close()
+
+ select {
+ case <-progressChan:
+ default:
+ t.Fatalf("Expected some output when closing prematurely")
+ }
+}
+
+func TestCompleteSilently(t *testing.T) {
+ content := []byte("TESTING")
+ reader := ioutil.NopCloser(bytes.NewReader(content))
+ progressChan := make(chan Progress, 10)
+
+ pr := NewProgressReader(reader, ChanOutput(progressChan), int64(len(content)), "Test", "Read")
+
+ out, err := ioutil.ReadAll(pr)
+ if err != nil {
+ pr.Close()
+ t.Fatal(err)
+ }
+ if string(out) != "TESTING" {
+ pr.Close()
+ t.Fatalf("Unexpected output %q from reader", string(out))
+ }
+
+drainLoop:
+ for {
+ select {
+ case <-progressChan:
+ default:
+ break drainLoop
+ }
+ }
+
+ pr.Close()
+
+ select {
+ case <-progressChan:
+ t.Fatalf("Should have closed silently when read is complete")
+ default:
+ }
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise/promise.go b/vendor/github.com/docker/docker/pkg/promise/promise.go
similarity index 100%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise/promise.go
rename to vendor/github.com/docker/docker/pkg/promise/promise.go
diff --git a/vendor/github.com/docker/docker/pkg/pubsub/publisher.go b/vendor/github.com/docker/docker/pkg/pubsub/publisher.go
new file mode 100644
index 00000000000..09364617e4d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/pubsub/publisher.go
@@ -0,0 +1,111 @@
+package pubsub
+
+import (
+ "sync"
+ "time"
+)
+
+var wgPool = sync.Pool{New: func() interface{} { return new(sync.WaitGroup) }}
+
+// NewPublisher creates a new pub/sub publisher to broadcast messages.
+// The duration is used as the send timeout as to not block the publisher publishing
+// messages to other clients if one client is slow or unresponsive.
+// The buffer is used when creating new channels for subscribers.
+func NewPublisher(publishTimeout time.Duration, buffer int) *Publisher {
+ return &Publisher{
+ buffer: buffer,
+ timeout: publishTimeout,
+ subscribers: make(map[subscriber]topicFunc),
+ }
+}
+
+type subscriber chan interface{}
+type topicFunc func(v interface{}) bool
+
+// Publisher is basic pub/sub structure. Allows to send events and subscribe
+// to them. Can be safely used from multiple goroutines.
+type Publisher struct {
+ m sync.RWMutex
+ buffer int
+ timeout time.Duration
+ subscribers map[subscriber]topicFunc
+}
+
+// Len returns the number of subscribers for the publisher
+func (p *Publisher) Len() int {
+ p.m.RLock()
+ i := len(p.subscribers)
+ p.m.RUnlock()
+ return i
+}
+
+// Subscribe adds a new subscriber to the publisher returning the channel.
+func (p *Publisher) Subscribe() chan interface{} {
+ return p.SubscribeTopic(nil)
+}
+
+// SubscribeTopic adds a new subscriber that filters messages sent by a topic.
+func (p *Publisher) SubscribeTopic(topic topicFunc) chan interface{} {
+ ch := make(chan interface{}, p.buffer)
+ p.m.Lock()
+ p.subscribers[ch] = topic
+ p.m.Unlock()
+ return ch
+}
+
+// Evict removes the specified subscriber from receiving any more messages.
+func (p *Publisher) Evict(sub chan interface{}) {
+ p.m.Lock()
+ delete(p.subscribers, sub)
+ close(sub)
+ p.m.Unlock()
+}
+
+// Publish sends the data in v to all subscribers currently registered with the publisher.
+func (p *Publisher) Publish(v interface{}) {
+ p.m.RLock()
+ if len(p.subscribers) == 0 {
+ p.m.RUnlock()
+ return
+ }
+
+ wg := wgPool.Get().(*sync.WaitGroup)
+ for sub, topic := range p.subscribers {
+ wg.Add(1)
+ go p.sendTopic(sub, topic, v, wg)
+ }
+ wg.Wait()
+ wgPool.Put(wg)
+ p.m.RUnlock()
+}
+
+// Close closes the channels to all subscribers registered with the publisher.
+func (p *Publisher) Close() {
+ p.m.Lock()
+ for sub := range p.subscribers {
+ delete(p.subscribers, sub)
+ close(sub)
+ }
+ p.m.Unlock()
+}
+
+func (p *Publisher) sendTopic(sub subscriber, topic topicFunc, v interface{}, wg *sync.WaitGroup) {
+ defer wg.Done()
+ if topic != nil && !topic(v) {
+ return
+ }
+
+ // send under a select as to not block if the receiver is unavailable
+ if p.timeout > 0 {
+ select {
+ case sub <- v:
+ case <-time.After(p.timeout):
+ }
+ return
+ }
+
+ select {
+ case sub <- v:
+ default:
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/pubsub/publisher_test.go b/vendor/github.com/docker/docker/pkg/pubsub/publisher_test.go
new file mode 100644
index 00000000000..d6b0a1d59a2
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/pubsub/publisher_test.go
@@ -0,0 +1,142 @@
+package pubsub
+
+import (
+ "fmt"
+ "testing"
+ "time"
+)
+
+func TestSendToOneSub(t *testing.T) {
+ p := NewPublisher(100*time.Millisecond, 10)
+ c := p.Subscribe()
+
+ p.Publish("hi")
+
+ msg := <-c
+ if msg.(string) != "hi" {
+ t.Fatalf("expected message hi but received %v", msg)
+ }
+}
+
+func TestSendToMultipleSubs(t *testing.T) {
+ p := NewPublisher(100*time.Millisecond, 10)
+ subs := []chan interface{}{}
+ subs = append(subs, p.Subscribe(), p.Subscribe(), p.Subscribe())
+
+ p.Publish("hi")
+
+ for _, c := range subs {
+ msg := <-c
+ if msg.(string) != "hi" {
+ t.Fatalf("expected message hi but received %v", msg)
+ }
+ }
+}
+
+func TestEvictOneSub(t *testing.T) {
+ p := NewPublisher(100*time.Millisecond, 10)
+ s1 := p.Subscribe()
+ s2 := p.Subscribe()
+
+ p.Evict(s1)
+ p.Publish("hi")
+ if _, ok := <-s1; ok {
+ t.Fatal("expected s1 to not receive the published message")
+ }
+
+ msg := <-s2
+ if msg.(string) != "hi" {
+ t.Fatalf("expected message hi but received %v", msg)
+ }
+}
+
+func TestClosePublisher(t *testing.T) {
+ p := NewPublisher(100*time.Millisecond, 10)
+ subs := []chan interface{}{}
+ subs = append(subs, p.Subscribe(), p.Subscribe(), p.Subscribe())
+ p.Close()
+
+ for _, c := range subs {
+ if _, ok := <-c; ok {
+ t.Fatal("expected all subscriber channels to be closed")
+ }
+ }
+}
+
+const sampleText = "test"
+
+type testSubscriber struct {
+ dataCh chan interface{}
+ ch chan error
+}
+
+func (s *testSubscriber) Wait() error {
+ return <-s.ch
+}
+
+func newTestSubscriber(p *Publisher) *testSubscriber {
+ ts := &testSubscriber{
+ dataCh: p.Subscribe(),
+ ch: make(chan error),
+ }
+ go func() {
+ for data := range ts.dataCh {
+ s, ok := data.(string)
+ if !ok {
+ ts.ch <- fmt.Errorf("Unexpected type %T", data)
+ break
+ }
+ if s != sampleText {
+ ts.ch <- fmt.Errorf("Unexpected text %s", s)
+ break
+ }
+ }
+ close(ts.ch)
+ }()
+ return ts
+}
+
+// for testing with -race
+func TestPubSubRace(t *testing.T) {
+ p := NewPublisher(0, 1024)
+ var subs [](*testSubscriber)
+ for j := 0; j < 50; j++ {
+ subs = append(subs, newTestSubscriber(p))
+ }
+ for j := 0; j < 1000; j++ {
+ p.Publish(sampleText)
+ }
+ time.AfterFunc(1*time.Second, func() {
+ for _, s := range subs {
+ p.Evict(s.dataCh)
+ }
+ })
+ for _, s := range subs {
+ s.Wait()
+ }
+}
+
+func BenchmarkPubSub(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ b.StopTimer()
+ p := NewPublisher(0, 1024)
+ var subs [](*testSubscriber)
+ for j := 0; j < 50; j++ {
+ subs = append(subs, newTestSubscriber(p))
+ }
+ b.StartTimer()
+ for j := 0; j < 1000; j++ {
+ p.Publish(sampleText)
+ }
+ time.AfterFunc(1*time.Second, func() {
+ for _, s := range subs {
+ p.Evict(s.dataCh)
+ }
+ })
+ for _, s := range subs {
+ if err := s.Wait(); err != nil {
+ b.Fatal(err)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/random/random.go b/vendor/github.com/docker/docker/pkg/random/random.go
new file mode 100644
index 00000000000..70de4d1304c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/random/random.go
@@ -0,0 +1,71 @@
+package random
+
+import (
+ cryptorand "crypto/rand"
+ "io"
+ "math"
+ "math/big"
+ "math/rand"
+ "sync"
+ "time"
+)
+
+// Rand is a global *rand.Rand instance, which initialized with NewSource() source.
+var Rand = rand.New(NewSource())
+
+// Reader is a global, shared instance of a pseudorandom bytes generator.
+// It doesn't consume entropy.
+var Reader io.Reader = &reader{rnd: Rand}
+
+// copypaste from standard math/rand
+type lockedSource struct {
+ lk sync.Mutex
+ src rand.Source
+}
+
+func (r *lockedSource) Int63() (n int64) {
+ r.lk.Lock()
+ n = r.src.Int63()
+ r.lk.Unlock()
+ return
+}
+
+func (r *lockedSource) Seed(seed int64) {
+ r.lk.Lock()
+ r.src.Seed(seed)
+ r.lk.Unlock()
+}
+
+// NewSource returns math/rand.Source safe for concurrent use and initialized
+// with current unix-nano timestamp
+func NewSource() rand.Source {
+ var seed int64
+ if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil {
+ // This should not happen, but worst-case fallback to time-based seed.
+ seed = time.Now().UnixNano()
+ } else {
+ seed = cryptoseed.Int64()
+ }
+ return &lockedSource{
+ src: rand.NewSource(seed),
+ }
+}
+
+type reader struct {
+ rnd *rand.Rand
+}
+
+func (r *reader) Read(b []byte) (int, error) {
+ i := 0
+ for {
+ val := r.rnd.Int63()
+ for val > 0 {
+ b[i] = byte(val)
+ i++
+ if i == len(b) {
+ return i, nil
+ }
+ val >>= 8
+ }
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/random/random_test.go b/vendor/github.com/docker/docker/pkg/random/random_test.go
new file mode 100644
index 00000000000..cf405f78cbd
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/random/random_test.go
@@ -0,0 +1,22 @@
+package random
+
+import (
+ "math/rand"
+ "sync"
+ "testing"
+)
+
+// for go test -v -race
+func TestConcurrency(t *testing.T) {
+ rnd := rand.New(NewSource())
+ var wg sync.WaitGroup
+
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func() {
+ rnd.Int63()
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+}
diff --git a/vendor/github.com/docker/docker/pkg/reexec/README.md b/vendor/github.com/docker/docker/pkg/reexec/README.md
new file mode 100644
index 00000000000..45592ce85a8
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/reexec/README.md
@@ -0,0 +1,5 @@
+## reexec
+
+The `reexec` package facilitates the busybox style reexec of the docker binary that we require because
+of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of
+the exec of the binary will be used to find and execute custom init paths.
diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_linux.go b/vendor/github.com/docker/docker/pkg/reexec/command_linux.go
new file mode 100644
index 00000000000..3c3a73a9d57
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/reexec/command_linux.go
@@ -0,0 +1,28 @@
+// +build linux
+
+package reexec
+
+import (
+ "os/exec"
+ "syscall"
+)
+
+// Self returns the path to the current process's binary.
+// Returns "/proc/self/exe".
+func Self() string {
+ return "/proc/self/exe"
+}
+
+// Command returns *exec.Cmd which have Path as current binary. Also it setting
+// SysProcAttr.Pdeathsig to SIGTERM.
+// This will use the in-memory version (/proc/self/exe) of the current binary,
+// it is thus safe to delete or replace the on-disk binary (os.Args[0]).
+func Command(args ...string) *exec.Cmd {
+ return &exec.Cmd{
+ Path: Self(),
+ Args: args,
+ SysProcAttr: &syscall.SysProcAttr{
+ Pdeathsig: syscall.SIGTERM,
+ },
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_unix.go b/vendor/github.com/docker/docker/pkg/reexec/command_unix.go
new file mode 100644
index 00000000000..b70edcb316b
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/reexec/command_unix.go
@@ -0,0 +1,23 @@
+// +build freebsd solaris
+
+package reexec
+
+import (
+ "os/exec"
+)
+
+// Self returns the path to the current process's binary.
+// Uses os.Args[0].
+func Self() string {
+ return naiveSelf()
+}
+
+// Command returns *exec.Cmd which have Path as current binary.
+// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will
+// be set to "/usr/bin/docker".
+func Command(args ...string) *exec.Cmd {
+ return &exec.Cmd{
+ Path: Self(),
+ Args: args,
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go b/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go
new file mode 100644
index 00000000000..9aed004e864
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go
@@ -0,0 +1,12 @@
+// +build !linux,!windows,!freebsd,!solaris
+
+package reexec
+
+import (
+ "os/exec"
+)
+
+// Command is unsupported on operating systems apart from Linux and Windows.
+func Command(args ...string) *exec.Cmd {
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_windows.go b/vendor/github.com/docker/docker/pkg/reexec/command_windows.go
new file mode 100644
index 00000000000..8d65e0ae1ad
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/reexec/command_windows.go
@@ -0,0 +1,23 @@
+// +build windows
+
+package reexec
+
+import (
+ "os/exec"
+)
+
+// Self returns the path to the current process's binary.
+// Uses os.Args[0].
+func Self() string {
+ return naiveSelf()
+}
+
+// Command returns *exec.Cmd which have Path as current binary.
+// For example if current binary is "docker.exe" at "C:\", then cmd.Path will
+// be set to "C:\docker.exe".
+func Command(args ...string) *exec.Cmd {
+ return &exec.Cmd{
+ Path: Self(),
+ Args: args,
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/reexec/reexec.go b/vendor/github.com/docker/docker/pkg/reexec/reexec.go
new file mode 100644
index 00000000000..c56671d9192
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/reexec/reexec.go
@@ -0,0 +1,47 @@
+package reexec
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+)
+
+var registeredInitializers = make(map[string]func())
+
+// Register adds an initialization func under the specified name
+func Register(name string, initializer func()) {
+ if _, exists := registeredInitializers[name]; exists {
+ panic(fmt.Sprintf("reexec func already registered under name %q", name))
+ }
+
+ registeredInitializers[name] = initializer
+}
+
+// Init is called as the first part of the exec process and returns true if an
+// initialization function was called.
+func Init() bool {
+ initializer, exists := registeredInitializers[os.Args[0]]
+ if exists {
+ initializer()
+
+ return true
+ }
+ return false
+}
+
+func naiveSelf() string {
+ name := os.Args[0]
+ if filepath.Base(name) == name {
+ if lp, err := exec.LookPath(name); err == nil {
+ return lp
+ }
+ }
+ // handle conversion of relative paths to absolute
+ if absName, err := filepath.Abs(name); err == nil {
+ return absName
+ }
+ // if we couldn't get absolute name, return original
+ // (NOTE: Go only errors on Abs() if os.Getwd fails)
+ return name
+}
diff --git a/vendor/github.com/docker/docker/pkg/registrar/registrar.go b/vendor/github.com/docker/docker/pkg/registrar/registrar.go
new file mode 100644
index 00000000000..1e75ee995b4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/registrar/registrar.go
@@ -0,0 +1,127 @@
+// Package registrar provides name registration. It reserves a name to a given key.
+package registrar
+
+import (
+ "errors"
+ "sync"
+)
+
+var (
+ // ErrNameReserved is an error which is returned when a name is requested to be reserved that already is reserved
+ ErrNameReserved = errors.New("name is reserved")
+ // ErrNameNotReserved is an error which is returned when trying to find a name that is not reserved
+ ErrNameNotReserved = errors.New("name is not reserved")
+ // ErrNoSuchKey is returned when trying to find the names for a key which is not known
+ ErrNoSuchKey = errors.New("provided key does not exist")
+)
+
+// Registrar stores indexes a list of keys and their registered names as well as indexes names and the key that they are registered to
+// Names must be unique.
+// Registrar is safe for concurrent access.
+type Registrar struct {
+ idx map[string][]string
+ names map[string]string
+ mu sync.Mutex
+}
+
+// NewRegistrar creates a new Registrar with the an empty index
+func NewRegistrar() *Registrar {
+ return &Registrar{
+ idx: make(map[string][]string),
+ names: make(map[string]string),
+ }
+}
+
+// Reserve registers a key to a name
+// Reserve is idempotent
+// Attempting to reserve a key to a name that already exists results in an `ErrNameReserved`
+// A name reservation is globally unique
+func (r *Registrar) Reserve(name, key string) error {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ if k, exists := r.names[name]; exists {
+ if k != key {
+ return ErrNameReserved
+ }
+ return nil
+ }
+
+ r.idx[key] = append(r.idx[key], name)
+ r.names[name] = key
+ return nil
+}
+
+// Release releases the reserved name
+// Once released, a name can be reserved again
+func (r *Registrar) Release(name string) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ key, exists := r.names[name]
+ if !exists {
+ return
+ }
+
+ for i, n := range r.idx[key] {
+ if n != name {
+ continue
+ }
+ r.idx[key] = append(r.idx[key][:i], r.idx[key][i+1:]...)
+ break
+ }
+
+ delete(r.names, name)
+
+ if len(r.idx[key]) == 0 {
+ delete(r.idx, key)
+ }
+}
+
+// Delete removes all reservations for the passed in key.
+// All names reserved to this key are released.
+func (r *Registrar) Delete(key string) {
+ r.mu.Lock()
+ for _, name := range r.idx[key] {
+ delete(r.names, name)
+ }
+ delete(r.idx, key)
+ r.mu.Unlock()
+}
+
+// GetNames lists all the reserved names for the given key
+func (r *Registrar) GetNames(key string) ([]string, error) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ names, exists := r.idx[key]
+ if !exists {
+ return nil, ErrNoSuchKey
+ }
+ return names, nil
+}
+
+// Get returns the key that the passed in name is reserved to
+func (r *Registrar) Get(name string) (string, error) {
+ r.mu.Lock()
+ key, exists := r.names[name]
+ r.mu.Unlock()
+
+ if !exists {
+ return "", ErrNameNotReserved
+ }
+ return key, nil
+}
+
+// GetAll returns all registered names
+func (r *Registrar) GetAll() map[string][]string {
+ out := make(map[string][]string)
+
+ r.mu.Lock()
+ // copy index into out
+ for id, names := range r.idx {
+ out[id] = names
+ }
+ r.mu.Unlock()
+ return out
+}
diff --git a/vendor/github.com/docker/docker/pkg/registrar/registrar_test.go b/vendor/github.com/docker/docker/pkg/registrar/registrar_test.go
new file mode 100644
index 00000000000..0c1ef312aef
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/registrar/registrar_test.go
@@ -0,0 +1,119 @@
+package registrar
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestReserve(t *testing.T) {
+ r := NewRegistrar()
+
+ obj := "test1"
+ if err := r.Reserve("test", obj); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := r.Reserve("test", obj); err != nil {
+ t.Fatal(err)
+ }
+
+ obj2 := "test2"
+ err := r.Reserve("test", obj2)
+ if err == nil {
+ t.Fatalf("expected error when reserving an already reserved name to another object")
+ }
+ if err != ErrNameReserved {
+ t.Fatal("expected `ErrNameReserved` error when attempting to reserve an already reserved name")
+ }
+}
+
+func TestRelease(t *testing.T) {
+ r := NewRegistrar()
+ obj := "testing"
+
+ if err := r.Reserve("test", obj); err != nil {
+ t.Fatal(err)
+ }
+ r.Release("test")
+ r.Release("test") // Ensure there is no panic here
+
+ if err := r.Reserve("test", obj); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestGetNames(t *testing.T) {
+ r := NewRegistrar()
+ obj := "testing"
+ names := []string{"test1", "test2"}
+
+ for _, name := range names {
+ if err := r.Reserve(name, obj); err != nil {
+ t.Fatal(err)
+ }
+ }
+ r.Reserve("test3", "other")
+
+ names2, err := r.GetNames(obj)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !reflect.DeepEqual(names, names2) {
+ t.Fatalf("Exepected: %v, Got: %v", names, names2)
+ }
+}
+
+func TestDelete(t *testing.T) {
+ r := NewRegistrar()
+ obj := "testing"
+ names := []string{"test1", "test2"}
+ for _, name := range names {
+ if err := r.Reserve(name, obj); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ r.Reserve("test3", "other")
+ r.Delete(obj)
+
+ _, err := r.GetNames(obj)
+ if err == nil {
+ t.Fatal("expected error getting names for deleted key")
+ }
+
+ if err != ErrNoSuchKey {
+ t.Fatal("expected `ErrNoSuchKey`")
+ }
+}
+
+func TestGet(t *testing.T) {
+ r := NewRegistrar()
+ obj := "testing"
+ name := "test"
+
+ _, err := r.Get(name)
+ if err == nil {
+ t.Fatal("expected error when key does not exist")
+ }
+ if err != ErrNameNotReserved {
+ t.Fatal(err)
+ }
+
+ if err := r.Reserve(name, obj); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err = r.Get(name); err != nil {
+ t.Fatal(err)
+ }
+
+ r.Delete(obj)
+ _, err = r.Get(name)
+ if err == nil {
+ t.Fatal("expected error when key does not exist")
+ }
+ if err != ErrNameNotReserved {
+ t.Fatal(err)
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/signal/README.md b/vendor/github.com/docker/docker/pkg/signal/README.md
new file mode 100644
index 00000000000..2b237a5942e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/signal/README.md
@@ -0,0 +1 @@
+This package provides helper functions for dealing with signals across various operating systems
\ No newline at end of file
diff --git a/vendor/github.com/docker/docker/pkg/signal/signal.go b/vendor/github.com/docker/docker/pkg/signal/signal.go
new file mode 100644
index 00000000000..68bb77cf58a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/signal/signal.go
@@ -0,0 +1,54 @@
+// Package signal provides helper functions for dealing with signals across
+// various operating systems.
+package signal
+
+import (
+ "fmt"
+ "os"
+ "os/signal"
+ "strconv"
+ "strings"
+ "syscall"
+)
+
+// CatchAll catches all signals and relays them to the specified channel.
+func CatchAll(sigc chan os.Signal) {
+ handledSigs := []os.Signal{}
+ for _, s := range SignalMap {
+ handledSigs = append(handledSigs, s)
+ }
+ signal.Notify(sigc, handledSigs...)
+}
+
+// StopCatch stops catching the signals and closes the specified channel.
+func StopCatch(sigc chan os.Signal) {
+ signal.Stop(sigc)
+ close(sigc)
+}
+
+// ParseSignal translates a string to a valid syscall signal.
+// It returns an error if the signal map doesn't include the given signal.
+func ParseSignal(rawSignal string) (syscall.Signal, error) {
+ s, err := strconv.Atoi(rawSignal)
+ if err == nil {
+ if s == 0 {
+ return -1, fmt.Errorf("Invalid signal: %s", rawSignal)
+ }
+ return syscall.Signal(s), nil
+ }
+ signal, ok := SignalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")]
+ if !ok {
+ return -1, fmt.Errorf("Invalid signal: %s", rawSignal)
+ }
+ return signal, nil
+}
+
+// ValidSignalForPlatform returns true if a signal is valid on the platform
+func ValidSignalForPlatform(sig syscall.Signal) bool {
+ for _, v := range SignalMap {
+ if v == sig {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go b/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go
new file mode 100644
index 00000000000..946de87e940
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go
@@ -0,0 +1,41 @@
+package signal
+
+import (
+ "syscall"
+)
+
+// SignalMap is a map of Darwin signals.
+var SignalMap = map[string]syscall.Signal{
+ "ABRT": syscall.SIGABRT,
+ "ALRM": syscall.SIGALRM,
+ "BUG": syscall.SIGBUS,
+ "CHLD": syscall.SIGCHLD,
+ "CONT": syscall.SIGCONT,
+ "EMT": syscall.SIGEMT,
+ "FPE": syscall.SIGFPE,
+ "HUP": syscall.SIGHUP,
+ "ILL": syscall.SIGILL,
+ "INFO": syscall.SIGINFO,
+ "INT": syscall.SIGINT,
+ "IO": syscall.SIGIO,
+ "IOT": syscall.SIGIOT,
+ "KILL": syscall.SIGKILL,
+ "PIPE": syscall.SIGPIPE,
+ "PROF": syscall.SIGPROF,
+ "QUIT": syscall.SIGQUIT,
+ "SEGV": syscall.SIGSEGV,
+ "STOP": syscall.SIGSTOP,
+ "SYS": syscall.SIGSYS,
+ "TERM": syscall.SIGTERM,
+ "TRAP": syscall.SIGTRAP,
+ "TSTP": syscall.SIGTSTP,
+ "TTIN": syscall.SIGTTIN,
+ "TTOU": syscall.SIGTTOU,
+ "URG": syscall.SIGURG,
+ "USR1": syscall.SIGUSR1,
+ "USR2": syscall.SIGUSR2,
+ "VTALRM": syscall.SIGVTALRM,
+ "WINCH": syscall.SIGWINCH,
+ "XCPU": syscall.SIGXCPU,
+ "XFSZ": syscall.SIGXFSZ,
+}
diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go b/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go
new file mode 100644
index 00000000000..6b9569bb754
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go
@@ -0,0 +1,43 @@
+package signal
+
+import (
+ "syscall"
+)
+
+// SignalMap is a map of FreeBSD signals.
+var SignalMap = map[string]syscall.Signal{
+ "ABRT": syscall.SIGABRT,
+ "ALRM": syscall.SIGALRM,
+ "BUF": syscall.SIGBUS,
+ "CHLD": syscall.SIGCHLD,
+ "CONT": syscall.SIGCONT,
+ "EMT": syscall.SIGEMT,
+ "FPE": syscall.SIGFPE,
+ "HUP": syscall.SIGHUP,
+ "ILL": syscall.SIGILL,
+ "INFO": syscall.SIGINFO,
+ "INT": syscall.SIGINT,
+ "IO": syscall.SIGIO,
+ "IOT": syscall.SIGIOT,
+ "KILL": syscall.SIGKILL,
+ "LWP": syscall.SIGLWP,
+ "PIPE": syscall.SIGPIPE,
+ "PROF": syscall.SIGPROF,
+ "QUIT": syscall.SIGQUIT,
+ "SEGV": syscall.SIGSEGV,
+ "STOP": syscall.SIGSTOP,
+ "SYS": syscall.SIGSYS,
+ "TERM": syscall.SIGTERM,
+ "THR": syscall.SIGTHR,
+ "TRAP": syscall.SIGTRAP,
+ "TSTP": syscall.SIGTSTP,
+ "TTIN": syscall.SIGTTIN,
+ "TTOU": syscall.SIGTTOU,
+ "URG": syscall.SIGURG,
+ "USR1": syscall.SIGUSR1,
+ "USR2": syscall.SIGUSR2,
+ "VTALRM": syscall.SIGVTALRM,
+ "WINCH": syscall.SIGWINCH,
+ "XCPU": syscall.SIGXCPU,
+ "XFSZ": syscall.SIGXFSZ,
+}
diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_linux.go b/vendor/github.com/docker/docker/pkg/signal/signal_linux.go
new file mode 100644
index 00000000000..d418cbe9e36
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/signal/signal_linux.go
@@ -0,0 +1,80 @@
+package signal
+
+import (
+ "syscall"
+)
+
+const (
+ sigrtmin = 34
+ sigrtmax = 64
+)
+
+// SignalMap is a map of Linux signals.
+var SignalMap = map[string]syscall.Signal{
+ "ABRT": syscall.SIGABRT,
+ "ALRM": syscall.SIGALRM,
+ "BUS": syscall.SIGBUS,
+ "CHLD": syscall.SIGCHLD,
+ "CLD": syscall.SIGCLD,
+ "CONT": syscall.SIGCONT,
+ "FPE": syscall.SIGFPE,
+ "HUP": syscall.SIGHUP,
+ "ILL": syscall.SIGILL,
+ "INT": syscall.SIGINT,
+ "IO": syscall.SIGIO,
+ "IOT": syscall.SIGIOT,
+ "KILL": syscall.SIGKILL,
+ "PIPE": syscall.SIGPIPE,
+ "POLL": syscall.SIGPOLL,
+ "PROF": syscall.SIGPROF,
+ "PWR": syscall.SIGPWR,
+ "QUIT": syscall.SIGQUIT,
+ "SEGV": syscall.SIGSEGV,
+ "STKFLT": syscall.SIGSTKFLT,
+ "STOP": syscall.SIGSTOP,
+ "SYS": syscall.SIGSYS,
+ "TERM": syscall.SIGTERM,
+ "TRAP": syscall.SIGTRAP,
+ "TSTP": syscall.SIGTSTP,
+ "TTIN": syscall.SIGTTIN,
+ "TTOU": syscall.SIGTTOU,
+ "UNUSED": syscall.SIGUNUSED,
+ "URG": syscall.SIGURG,
+ "USR1": syscall.SIGUSR1,
+ "USR2": syscall.SIGUSR2,
+ "VTALRM": syscall.SIGVTALRM,
+ "WINCH": syscall.SIGWINCH,
+ "XCPU": syscall.SIGXCPU,
+ "XFSZ": syscall.SIGXFSZ,
+ "RTMIN": sigrtmin,
+ "RTMIN+1": sigrtmin + 1,
+ "RTMIN+2": sigrtmin + 2,
+ "RTMIN+3": sigrtmin + 3,
+ "RTMIN+4": sigrtmin + 4,
+ "RTMIN+5": sigrtmin + 5,
+ "RTMIN+6": sigrtmin + 6,
+ "RTMIN+7": sigrtmin + 7,
+ "RTMIN+8": sigrtmin + 8,
+ "RTMIN+9": sigrtmin + 9,
+ "RTMIN+10": sigrtmin + 10,
+ "RTMIN+11": sigrtmin + 11,
+ "RTMIN+12": sigrtmin + 12,
+ "RTMIN+13": sigrtmin + 13,
+ "RTMIN+14": sigrtmin + 14,
+ "RTMIN+15": sigrtmin + 15,
+ "RTMAX-14": sigrtmax - 14,
+ "RTMAX-13": sigrtmax - 13,
+ "RTMAX-12": sigrtmax - 12,
+ "RTMAX-11": sigrtmax - 11,
+ "RTMAX-10": sigrtmax - 10,
+ "RTMAX-9": sigrtmax - 9,
+ "RTMAX-8": sigrtmax - 8,
+ "RTMAX-7": sigrtmax - 7,
+ "RTMAX-6": sigrtmax - 6,
+ "RTMAX-5": sigrtmax - 5,
+ "RTMAX-4": sigrtmax - 4,
+ "RTMAX-3": sigrtmax - 3,
+ "RTMAX-2": sigrtmax - 2,
+ "RTMAX-1": sigrtmax - 1,
+ "RTMAX": sigrtmax,
+}
diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_solaris.go b/vendor/github.com/docker/docker/pkg/signal/signal_solaris.go
new file mode 100644
index 00000000000..89576b9e3bf
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/signal/signal_solaris.go
@@ -0,0 +1,42 @@
+package signal
+
+import (
+ "syscall"
+)
+
+// SignalMap is a map of Solaris signals.
+// SIGINFO and SIGTHR not defined for Solaris
+var SignalMap = map[string]syscall.Signal{
+ "ABRT": syscall.SIGABRT,
+ "ALRM": syscall.SIGALRM,
+ "BUF": syscall.SIGBUS,
+ "CHLD": syscall.SIGCHLD,
+ "CONT": syscall.SIGCONT,
+ "EMT": syscall.SIGEMT,
+ "FPE": syscall.SIGFPE,
+ "HUP": syscall.SIGHUP,
+ "ILL": syscall.SIGILL,
+ "INT": syscall.SIGINT,
+ "IO": syscall.SIGIO,
+ "IOT": syscall.SIGIOT,
+ "KILL": syscall.SIGKILL,
+ "LWP": syscall.SIGLWP,
+ "PIPE": syscall.SIGPIPE,
+ "PROF": syscall.SIGPROF,
+ "QUIT": syscall.SIGQUIT,
+ "SEGV": syscall.SIGSEGV,
+ "STOP": syscall.SIGSTOP,
+ "SYS": syscall.SIGSYS,
+ "TERM": syscall.SIGTERM,
+ "TRAP": syscall.SIGTRAP,
+ "TSTP": syscall.SIGTSTP,
+ "TTIN": syscall.SIGTTIN,
+ "TTOU": syscall.SIGTTOU,
+ "URG": syscall.SIGURG,
+ "USR1": syscall.SIGUSR1,
+ "USR2": syscall.SIGUSR2,
+ "VTALRM": syscall.SIGVTALRM,
+ "WINCH": syscall.SIGWINCH,
+ "XCPU": syscall.SIGXCPU,
+ "XFSZ": syscall.SIGXFSZ,
+}
diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_unix.go b/vendor/github.com/docker/docker/pkg/signal/signal_unix.go
new file mode 100644
index 00000000000..6621d37184b
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/signal/signal_unix.go
@@ -0,0 +1,21 @@
+// +build !windows
+
+package signal
+
+import (
+ "syscall"
+)
+
+// Signals used in api/client (no windows equivalent, use
+// invalid signals so they don't get handled)
+
+const (
+ // SIGCHLD is a signal sent to a process when a child process terminates, is interrupted, or resumes after being interrupted.
+ SIGCHLD = syscall.SIGCHLD
+ // SIGWINCH is a signal sent to a process when its controlling terminal changes its size
+ SIGWINCH = syscall.SIGWINCH
+ // SIGPIPE is a signal sent to a process when a pipe is written to before the other end is open for reading
+ SIGPIPE = syscall.SIGPIPE
+ // DefaultStopSignal is the syscall signal used to stop a container in unix systems.
+ DefaultStopSignal = "SIGTERM"
+)
diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go b/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go
new file mode 100644
index 00000000000..c592d37dfeb
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go
@@ -0,0 +1,10 @@
+// +build !linux,!darwin,!freebsd,!windows,!solaris
+
+package signal
+
+import (
+ "syscall"
+)
+
+// SignalMap is an empty map of signals for unsupported platform.
+var SignalMap = map[string]syscall.Signal{}
diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_windows.go b/vendor/github.com/docker/docker/pkg/signal/signal_windows.go
new file mode 100644
index 00000000000..698cbf2dc8e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/signal/signal_windows.go
@@ -0,0 +1,28 @@
+// +build windows
+
+package signal
+
+import (
+ "syscall"
+)
+
+// Signals used in api/client (no windows equivalent, use
+// invalid signals so they don't get handled)
+const (
+ SIGCHLD = syscall.Signal(0xff)
+ SIGWINCH = syscall.Signal(0xff)
+ SIGPIPE = syscall.Signal(0xff)
+ // DefaultStopSignal is the syscall signal used to stop a container in windows systems.
+ DefaultStopSignal = "15"
+)
+
+// SignalMap is a map of "supported" signals. As per the comment in GOLang's
+// ztypes_windows.go: "More invented values for signals". Windows doesn't
+// really support signals in any way, shape or form that Unix does.
+//
+// We have these so that docker kill can be used to gracefully (TERM) and
+// forcibly (KILL) terminate a container on Windows.
+var SignalMap = map[string]syscall.Signal{
+ "KILL": syscall.SIGKILL,
+ "TERM": syscall.SIGTERM,
+}
diff --git a/vendor/github.com/docker/docker/pkg/signal/trap.go b/vendor/github.com/docker/docker/pkg/signal/trap.go
new file mode 100644
index 00000000000..d35ef0e8622
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/signal/trap.go
@@ -0,0 +1,81 @@
+package signal
+
+import (
+ "os"
+ gosignal "os/signal"
+ "runtime"
+ "sync/atomic"
+ "syscall"
+
+ "github.com/Sirupsen/logrus"
+)
+
+// Trap sets up a simplified signal "trap", appropriate for common
+// behavior expected from a vanilla unix command-line tool in general
+// (and the Docker engine in particular).
+//
+// * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated.
+// * If SIGINT or SIGTERM are received 3 times before cleanup is complete, then cleanup is
+// skipped and the process is terminated immediately (allows force quit of stuck daemon)
+// * A SIGQUIT always causes an exit without cleanup, with a goroutine dump preceding exit.
+// * Ignore SIGPIPE events. These are generated by systemd when journald is restarted while
+// the docker daemon is not restarted and also running under systemd.
+// Fixes https://github.com/docker/docker/issues/19728
+//
+func Trap(cleanup func()) {
+ c := make(chan os.Signal, 1)
+ // we will handle INT, TERM, QUIT, SIGPIPE here
+ signals := []os.Signal{os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGPIPE}
+ gosignal.Notify(c, signals...)
+ go func() {
+ interruptCount := uint32(0)
+ for sig := range c {
+ if sig == syscall.SIGPIPE {
+ continue
+ }
+
+ go func(sig os.Signal) {
+ logrus.Infof("Processing signal '%v'", sig)
+ switch sig {
+ case os.Interrupt, syscall.SIGTERM:
+ if atomic.LoadUint32(&interruptCount) < 3 {
+ // Initiate the cleanup only once
+ if atomic.AddUint32(&interruptCount, 1) == 1 {
+ // Call the provided cleanup handler
+ cleanup()
+ os.Exit(0)
+ } else {
+ return
+ }
+ } else {
+ // 3 SIGTERM/INT signals received; force exit without cleanup
+ logrus.Info("Forcing docker daemon shutdown without cleanup; 3 interrupts received")
+ }
+ case syscall.SIGQUIT:
+ DumpStacks()
+ logrus.Info("Forcing docker daemon shutdown without cleanup on SIGQUIT")
+ }
+ //for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal #
+ os.Exit(128 + int(sig.(syscall.Signal)))
+ }(sig)
+ }
+ }()
+}
+
+// DumpStacks dumps the runtime stack.
+func DumpStacks() {
+ var (
+ buf []byte
+ stackSize int
+ )
+ bufferLen := 16384
+ for stackSize == len(buf) {
+ buf = make([]byte, bufferLen)
+ stackSize = runtime.Stack(buf, true)
+ bufferLen *= 2
+ }
+ buf = buf[:stackSize]
+ // Note that if the daemon is started with a less-verbose log-level than "info" (the default), the goroutine
+ // traces won't show up in the log.
+ logrus.Infof("=== BEGIN goroutine stack dump ===\n%s\n=== END goroutine stack dump ===", buf)
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go
similarity index 55%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy.go
rename to vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go
index 63b3df79fce..8f67ece9492 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy.go
+++ b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go
@@ -1,49 +1,69 @@
package stdcopy
import (
+ "bytes"
"encoding/binary"
"errors"
+ "fmt"
"io"
+ "sync"
- "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
+ "github.com/Sirupsen/logrus"
)
+// StdType is the type of standard stream
+// a writer can multiplex to.
+type StdType byte
+
const (
- StdWriterPrefixLen = 8
- StdWriterFdIndex = 0
- StdWriterSizeIndex = 4
+ // Stdin represents standard input stream type.
+ Stdin StdType = iota
+ // Stdout represents standard output stream type.
+ Stdout
+ // Stderr represents standard error steam type.
+ Stderr
+
+ stdWriterPrefixLen = 8
+ stdWriterFdIndex = 0
+ stdWriterSizeIndex = 4
+
+ startingBufLen = 32*1024 + stdWriterPrefixLen + 1
)
-type StdType [StdWriterPrefixLen]byte
-
-var (
- Stdin StdType = StdType{0: 0}
- Stdout StdType = StdType{0: 1}
- Stderr StdType = StdType{0: 2}
-)
+var bufPool = &sync.Pool{New: func() interface{} { return bytes.NewBuffer(nil) }}
-type StdWriter struct {
+// stdWriter is wrapper of io.Writer with extra customized info.
+type stdWriter struct {
io.Writer
- prefix StdType
- sizeBuf []byte
+ prefix byte
}
-func (w *StdWriter) Write(buf []byte) (n int, err error) {
- var n1, n2 int
+// Write sends the buffer to the underneath writer.
+// It inserts the prefix header before the buffer,
+// so stdcopy.StdCopy knows where to multiplex the output.
+// It makes stdWriter to implement io.Writer.
+func (w *stdWriter) Write(p []byte) (n int, err error) {
if w == nil || w.Writer == nil {
return 0, errors.New("Writer not instantiated")
}
- binary.BigEndian.PutUint32(w.prefix[4:], uint32(len(buf)))
- n1, err = w.Writer.Write(w.prefix[:])
- if err != nil {
- n = n1 - StdWriterPrefixLen
- } else {
- n2, err = w.Writer.Write(buf)
- n = n1 + n2 - StdWriterPrefixLen
+ if p == nil {
+ return 0, nil
}
+
+ header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix}
+ binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(p)))
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Write(header[:])
+ buf.Write(p)
+
+ n, err = w.Writer.Write(buf.Bytes())
+ n -= stdWriterPrefixLen
if n < 0 {
n = 0
}
+
+ buf.Reset()
+ bufPool.Put(buf)
return
}
@@ -53,16 +73,13 @@ func (w *StdWriter) Write(buf []byte) (n int, err error) {
// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection.
// `t` indicates the id of the stream to encapsulate.
// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr.
-func NewStdWriter(w io.Writer, t StdType) *StdWriter {
- return &StdWriter{
- Writer: w,
- prefix: t,
- sizeBuf: make([]byte, 4),
+func NewStdWriter(w io.Writer, t StdType) io.Writer {
+ return &stdWriter{
+ Writer: w,
+ prefix: byte(t),
}
}
-var ErrInvalidStdHeader = errors.New("Unrecognized input header")
-
// StdCopy is a modified version of io.Copy.
//
// StdCopy will demultiplex `src`, assuming that it contains two streams,
@@ -75,7 +92,7 @@ var ErrInvalidStdHeader = errors.New("Unrecognized input header")
// `written` will hold the total number of bytes written to `dstout` and `dsterr`.
func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) {
var (
- buf = make([]byte, 32*1024+StdWriterPrefixLen+1)
+ buf = make([]byte, startingBufLen)
bufLen = len(buf)
nr, nw int
er, ew error
@@ -85,12 +102,12 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error)
for {
// Make sure we have at least a full header
- for nr < StdWriterPrefixLen {
+ for nr < stdWriterPrefixLen {
var nr2 int
nr2, er = src.Read(buf[nr:])
nr += nr2
if er == io.EOF {
- if nr < StdWriterPrefixLen {
+ if nr < stdWriterPrefixLen {
logrus.Debugf("Corrupted prefix: %v", buf[:nr])
return written, nil
}
@@ -103,40 +120,40 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error)
}
// Check the first byte to know where to write
- switch buf[StdWriterFdIndex] {
- case 0:
+ switch StdType(buf[stdWriterFdIndex]) {
+ case Stdin:
fallthrough
- case 1:
+ case Stdout:
// Write on stdout
out = dstout
- case 2:
+ case Stderr:
// Write on stderr
out = dsterr
default:
- logrus.Debugf("Error selecting output fd: (%d)", buf[StdWriterFdIndex])
- return 0, ErrInvalidStdHeader
+ logrus.Debugf("Error selecting output fd: (%d)", buf[stdWriterFdIndex])
+ return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex])
}
// Retrieve the size of the frame
- frameSize = int(binary.BigEndian.Uint32(buf[StdWriterSizeIndex : StdWriterSizeIndex+4]))
+ frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4]))
logrus.Debugf("framesize: %d", frameSize)
// Check if the buffer is big enough to read the frame.
// Extend it if necessary.
- if frameSize+StdWriterPrefixLen > bufLen {
- logrus.Debugf("Extending buffer cap by %d (was %d)", frameSize+StdWriterPrefixLen-bufLen+1, len(buf))
- buf = append(buf, make([]byte, frameSize+StdWriterPrefixLen-bufLen+1)...)
+ if frameSize+stdWriterPrefixLen > bufLen {
+ logrus.Debugf("Extending buffer cap by %d (was %d)", frameSize+stdWriterPrefixLen-bufLen+1, len(buf))
+ buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...)
bufLen = len(buf)
}
// While the amount of bytes read is less than the size of the frame + header, we keep reading
- for nr < frameSize+StdWriterPrefixLen {
+ for nr < frameSize+stdWriterPrefixLen {
var nr2 int
nr2, er = src.Read(buf[nr:])
nr += nr2
if er == io.EOF {
- if nr < frameSize+StdWriterPrefixLen {
- logrus.Debugf("Corrupted frame: %v", buf[StdWriterPrefixLen:nr])
+ if nr < frameSize+stdWriterPrefixLen {
+ logrus.Debugf("Corrupted frame: %v", buf[stdWriterPrefixLen:nr])
return written, nil
}
break
@@ -148,7 +165,7 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error)
}
// Write the retrieved frame (without header)
- nw, ew = out.Write(buf[StdWriterPrefixLen : frameSize+StdWriterPrefixLen])
+ nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen])
if ew != nil {
logrus.Debugf("Error writing frame: %s", ew)
return 0, ew
@@ -161,8 +178,8 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error)
written += int64(nw)
// Move the rest of the buffer to the beginning
- copy(buf, buf[frameSize+StdWriterPrefixLen:])
+ copy(buf, buf[frameSize+stdWriterPrefixLen:])
// Move the index
- nr -= frameSize + StdWriterPrefixLen
+ nr -= frameSize + stdWriterPrefixLen
}
}
diff --git a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go
new file mode 100644
index 00000000000..3137a752398
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go
@@ -0,0 +1,260 @@
+package stdcopy
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "io/ioutil"
+ "strings"
+ "testing"
+)
+
+func TestNewStdWriter(t *testing.T) {
+ writer := NewStdWriter(ioutil.Discard, Stdout)
+ if writer == nil {
+ t.Fatalf("NewStdWriter with an invalid StdType should not return nil.")
+ }
+}
+
+func TestWriteWithUnitializedStdWriter(t *testing.T) {
+ writer := stdWriter{
+ Writer: nil,
+ prefix: byte(Stdout),
+ }
+ n, err := writer.Write([]byte("Something here"))
+ if n != 0 || err == nil {
+ t.Fatalf("Should fail when given an uncomplete or uninitialized StdWriter")
+ }
+}
+
+func TestWriteWithNilBytes(t *testing.T) {
+ writer := NewStdWriter(ioutil.Discard, Stdout)
+ n, err := writer.Write(nil)
+ if err != nil {
+ t.Fatalf("Shouldn't have fail when given no data")
+ }
+ if n > 0 {
+ t.Fatalf("Write should have written 0 byte, but has written %d", n)
+ }
+}
+
+func TestWrite(t *testing.T) {
+ writer := NewStdWriter(ioutil.Discard, Stdout)
+ data := []byte("Test StdWrite.Write")
+ n, err := writer.Write(data)
+ if err != nil {
+ t.Fatalf("Error while writing with StdWrite")
+ }
+ if n != len(data) {
+ t.Fatalf("Write should have written %d byte but wrote %d.", len(data), n)
+ }
+}
+
+type errWriter struct {
+ n int
+ err error
+}
+
+func (f *errWriter) Write(buf []byte) (int, error) {
+ return f.n, f.err
+}
+
+func TestWriteWithWriterError(t *testing.T) {
+ expectedError := errors.New("expected")
+ expectedReturnedBytes := 10
+ writer := NewStdWriter(&errWriter{
+ n: stdWriterPrefixLen + expectedReturnedBytes,
+ err: expectedError}, Stdout)
+ data := []byte("This won't get written, sigh")
+ n, err := writer.Write(data)
+ if err != expectedError {
+ t.Fatalf("Didn't get expected error.")
+ }
+ if n != expectedReturnedBytes {
+ t.Fatalf("Didn't get expected written bytes %d, got %d.",
+ expectedReturnedBytes, n)
+ }
+}
+
+func TestWriteDoesNotReturnNegativeWrittenBytes(t *testing.T) {
+ writer := NewStdWriter(&errWriter{n: -1}, Stdout)
+ data := []byte("This won't get written, sigh")
+ actual, _ := writer.Write(data)
+ if actual != 0 {
+ t.Fatalf("Expected returned written bytes equal to 0, got %d", actual)
+ }
+}
+
+func getSrcBuffer(stdOutBytes, stdErrBytes []byte) (buffer *bytes.Buffer, err error) {
+ buffer = new(bytes.Buffer)
+ dstOut := NewStdWriter(buffer, Stdout)
+ _, err = dstOut.Write(stdOutBytes)
+ if err != nil {
+ return
+ }
+ dstErr := NewStdWriter(buffer, Stderr)
+ _, err = dstErr.Write(stdErrBytes)
+ return
+}
+
+func TestStdCopyWriteAndRead(t *testing.T) {
+ stdOutBytes := []byte(strings.Repeat("o", startingBufLen))
+ stdErrBytes := []byte(strings.Repeat("e", startingBufLen))
+ buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ written, err := StdCopy(ioutil.Discard, ioutil.Discard, buffer)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expectedTotalWritten := len(stdOutBytes) + len(stdErrBytes)
+ if written != int64(expectedTotalWritten) {
+ t.Fatalf("Expected to have total of %d bytes written, got %d", expectedTotalWritten, written)
+ }
+}
+
+type customReader struct {
+ n int
+ err error
+ totalCalls int
+ correctCalls int
+ src *bytes.Buffer
+}
+
+func (f *customReader) Read(buf []byte) (int, error) {
+ f.totalCalls++
+ if f.totalCalls <= f.correctCalls {
+ return f.src.Read(buf)
+ }
+ return f.n, f.err
+}
+
+func TestStdCopyReturnsErrorReadingHeader(t *testing.T) {
+ expectedError := errors.New("error")
+ reader := &customReader{
+ err: expectedError}
+ written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader)
+ if written != 0 {
+ t.Fatalf("Expected 0 bytes read, got %d", written)
+ }
+ if err != expectedError {
+ t.Fatalf("Didn't get expected error")
+ }
+}
+
+func TestStdCopyReturnsErrorReadingFrame(t *testing.T) {
+ expectedError := errors.New("error")
+ stdOutBytes := []byte(strings.Repeat("o", startingBufLen))
+ stdErrBytes := []byte(strings.Repeat("e", startingBufLen))
+ buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ reader := &customReader{
+ correctCalls: 1,
+ n: stdWriterPrefixLen + 1,
+ err: expectedError,
+ src: buffer}
+ written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader)
+ if written != 0 {
+ t.Fatalf("Expected 0 bytes read, got %d", written)
+ }
+ if err != expectedError {
+ t.Fatalf("Didn't get expected error")
+ }
+}
+
+func TestStdCopyDetectsCorruptedFrame(t *testing.T) {
+ stdOutBytes := []byte(strings.Repeat("o", startingBufLen))
+ stdErrBytes := []byte(strings.Repeat("e", startingBufLen))
+ buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ reader := &customReader{
+ correctCalls: 1,
+ n: stdWriterPrefixLen + 1,
+ err: io.EOF,
+ src: buffer}
+ written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader)
+ if written != startingBufLen {
+ t.Fatalf("Expected %d bytes read, got %d", startingBufLen, written)
+ }
+ if err != nil {
+ t.Fatal("Didn't get nil error")
+ }
+}
+
+func TestStdCopyWithInvalidInputHeader(t *testing.T) {
+ dstOut := NewStdWriter(ioutil.Discard, Stdout)
+ dstErr := NewStdWriter(ioutil.Discard, Stderr)
+ src := strings.NewReader("Invalid input")
+ _, err := StdCopy(dstOut, dstErr, src)
+ if err == nil {
+ t.Fatal("StdCopy with invalid input header should fail.")
+ }
+}
+
+func TestStdCopyWithCorruptedPrefix(t *testing.T) {
+ data := []byte{0x01, 0x02, 0x03}
+ src := bytes.NewReader(data)
+ written, err := StdCopy(nil, nil, src)
+ if err != nil {
+ t.Fatalf("StdCopy should not return an error with corrupted prefix.")
+ }
+ if written != 0 {
+ t.Fatalf("StdCopy should have written 0, but has written %d", written)
+ }
+}
+
+func TestStdCopyReturnsWriteErrors(t *testing.T) {
+ stdOutBytes := []byte(strings.Repeat("o", startingBufLen))
+ stdErrBytes := []byte(strings.Repeat("e", startingBufLen))
+ buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expectedError := errors.New("expected")
+
+ dstOut := &errWriter{err: expectedError}
+
+ written, err := StdCopy(dstOut, ioutil.Discard, buffer)
+ if written != 0 {
+ t.Fatalf("StdCopy should have written 0, but has written %d", written)
+ }
+ if err != expectedError {
+ t.Fatalf("Didn't get expected error, got %v", err)
+ }
+}
+
+func TestStdCopyDetectsNotFullyWrittenFrames(t *testing.T) {
+ stdOutBytes := []byte(strings.Repeat("o", startingBufLen))
+ stdErrBytes := []byte(strings.Repeat("e", startingBufLen))
+ buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ dstOut := &errWriter{n: startingBufLen - 10}
+
+ written, err := StdCopy(dstOut, ioutil.Discard, buffer)
+ if written != 0 {
+ t.Fatalf("StdCopy should have return 0 written bytes, but returned %d", written)
+ }
+ if err != io.ErrShortWrite {
+ t.Fatalf("Didn't get expected io.ErrShortWrite error")
+ }
+}
+
+func BenchmarkWrite(b *testing.B) {
+ w := NewStdWriter(ioutil.Discard, Stdout)
+ data := []byte("Test line for testing stdwriter performance\n")
+ data = bytes.Repeat(data, 100)
+ b.SetBytes(int64(len(data)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ if _, err := w.Write(data); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go
new file mode 100644
index 00000000000..ce6ea79deea
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go
@@ -0,0 +1,172 @@
+// Package streamformatter provides helper functions to format a stream.
+package streamformatter
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/docker/docker/pkg/jsonmessage"
+ "github.com/docker/docker/pkg/progress"
+)
+
+// StreamFormatter formats a stream, optionally using JSON.
+type StreamFormatter struct {
+ json bool
+}
+
+// NewStreamFormatter returns a simple StreamFormatter
+func NewStreamFormatter() *StreamFormatter {
+ return &StreamFormatter{}
+}
+
+// NewJSONStreamFormatter returns a StreamFormatter configured to stream json
+func NewJSONStreamFormatter() *StreamFormatter {
+ return &StreamFormatter{true}
+}
+
+const streamNewline = "\r\n"
+
+var streamNewlineBytes = []byte(streamNewline)
+
+// FormatStream formats the specified stream.
+func (sf *StreamFormatter) FormatStream(str string) []byte {
+ if sf.json {
+ b, err := json.Marshal(&jsonmessage.JSONMessage{Stream: str})
+ if err != nil {
+ return sf.FormatError(err)
+ }
+ return append(b, streamNewlineBytes...)
+ }
+ return []byte(str + "\r")
+}
+
+// FormatStatus formats the specified objects according to the specified format (and id).
+func (sf *StreamFormatter) FormatStatus(id, format string, a ...interface{}) []byte {
+ str := fmt.Sprintf(format, a...)
+ if sf.json {
+ b, err := json.Marshal(&jsonmessage.JSONMessage{ID: id, Status: str})
+ if err != nil {
+ return sf.FormatError(err)
+ }
+ return append(b, streamNewlineBytes...)
+ }
+ return []byte(str + streamNewline)
+}
+
+// FormatError formats the specified error.
+func (sf *StreamFormatter) FormatError(err error) []byte {
+ if sf.json {
+ jsonError, ok := err.(*jsonmessage.JSONError)
+ if !ok {
+ jsonError = &jsonmessage.JSONError{Message: err.Error()}
+ }
+ if b, err := json.Marshal(&jsonmessage.JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil {
+ return append(b, streamNewlineBytes...)
+ }
+ return []byte("{\"error\":\"format error\"}" + streamNewline)
+ }
+ return []byte("Error: " + err.Error() + streamNewline)
+}
+
+// FormatProgress formats the progress information for a specified action.
+func (sf *StreamFormatter) FormatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte {
+ if progress == nil {
+ progress = &jsonmessage.JSONProgress{}
+ }
+ if sf.json {
+ var auxJSON *json.RawMessage
+ if aux != nil {
+ auxJSONBytes, err := json.Marshal(aux)
+ if err != nil {
+ return nil
+ }
+ auxJSON = new(json.RawMessage)
+ *auxJSON = auxJSONBytes
+ }
+ b, err := json.Marshal(&jsonmessage.JSONMessage{
+ Status: action,
+ ProgressMessage: progress.String(),
+ Progress: progress,
+ ID: id,
+ Aux: auxJSON,
+ })
+ if err != nil {
+ return nil
+ }
+ return append(b, streamNewlineBytes...)
+ }
+ endl := "\r"
+ if progress.String() == "" {
+ endl += "\n"
+ }
+ return []byte(action + " " + progress.String() + endl)
+}
+
+// NewProgressOutput returns a progress.Output object that can be passed to
+// progress.NewProgressReader.
+func (sf *StreamFormatter) NewProgressOutput(out io.Writer, newLines bool) progress.Output {
+ return &progressOutput{
+ sf: sf,
+ out: out,
+ newLines: newLines,
+ }
+}
+
+type progressOutput struct {
+ sf *StreamFormatter
+ out io.Writer
+ newLines bool
+}
+
+// WriteProgress formats progress information from a ProgressReader.
+func (out *progressOutput) WriteProgress(prog progress.Progress) error {
+ var formatted []byte
+ if prog.Message != "" {
+ formatted = out.sf.FormatStatus(prog.ID, prog.Message)
+ } else {
+ jsonProgress := jsonmessage.JSONProgress{Current: prog.Current, Total: prog.Total}
+ formatted = out.sf.FormatProgress(prog.ID, prog.Action, &jsonProgress, prog.Aux)
+ }
+ _, err := out.out.Write(formatted)
+ if err != nil {
+ return err
+ }
+
+ if out.newLines && prog.LastUpdate {
+ _, err = out.out.Write(out.sf.FormatStatus("", ""))
+ return err
+ }
+
+ return nil
+}
+
+// StdoutFormatter is a streamFormatter that writes to the standard output.
+type StdoutFormatter struct {
+ io.Writer
+ *StreamFormatter
+}
+
+func (sf *StdoutFormatter) Write(buf []byte) (int, error) {
+ formattedBuf := sf.StreamFormatter.FormatStream(string(buf))
+ n, err := sf.Writer.Write(formattedBuf)
+ if n != len(formattedBuf) {
+ return n, io.ErrShortWrite
+ }
+ return len(buf), err
+}
+
+// StderrFormatter is a streamFormatter that writes to the standard error.
+type StderrFormatter struct {
+ io.Writer
+ *StreamFormatter
+}
+
+func (sf *StderrFormatter) Write(buf []byte) (int, error) {
+ formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m")
+ n, err := sf.Writer.Write(formattedBuf)
+ if n != len(formattedBuf) {
+ return n, io.ErrShortWrite
+ }
+ return len(buf), err
+}
diff --git a/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter_test.go b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter_test.go
new file mode 100644
index 00000000000..93ec90f5f7d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter_test.go
@@ -0,0 +1,108 @@
+package streamformatter
+
+import (
+ "encoding/json"
+ "errors"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/docker/docker/pkg/jsonmessage"
+)
+
+func TestFormatStream(t *testing.T) {
+ sf := NewStreamFormatter()
+ res := sf.FormatStream("stream")
+ if string(res) != "stream"+"\r" {
+ t.Fatalf("%q", res)
+ }
+}
+
+func TestFormatJSONStatus(t *testing.T) {
+ sf := NewStreamFormatter()
+ res := sf.FormatStatus("ID", "%s%d", "a", 1)
+ if string(res) != "a1\r\n" {
+ t.Fatalf("%q", res)
+ }
+}
+
+func TestFormatSimpleError(t *testing.T) {
+ sf := NewStreamFormatter()
+ res := sf.FormatError(errors.New("Error for formatter"))
+ if string(res) != "Error: Error for formatter\r\n" {
+ t.Fatalf("%q", res)
+ }
+}
+
+func TestJSONFormatStream(t *testing.T) {
+ sf := NewJSONStreamFormatter()
+ res := sf.FormatStream("stream")
+ if string(res) != `{"stream":"stream"}`+"\r\n" {
+ t.Fatalf("%q", res)
+ }
+}
+
+func TestJSONFormatStatus(t *testing.T) {
+ sf := NewJSONStreamFormatter()
+ res := sf.FormatStatus("ID", "%s%d", "a", 1)
+ if string(res) != `{"status":"a1","id":"ID"}`+"\r\n" {
+ t.Fatalf("%q", res)
+ }
+}
+
+func TestJSONFormatSimpleError(t *testing.T) {
+ sf := NewJSONStreamFormatter()
+ res := sf.FormatError(errors.New("Error for formatter"))
+ if string(res) != `{"errorDetail":{"message":"Error for formatter"},"error":"Error for formatter"}`+"\r\n" {
+ t.Fatalf("%q", res)
+ }
+}
+
+func TestJSONFormatJSONError(t *testing.T) {
+ sf := NewJSONStreamFormatter()
+ err := &jsonmessage.JSONError{Code: 50, Message: "Json error"}
+ res := sf.FormatError(err)
+ if string(res) != `{"errorDetail":{"code":50,"message":"Json error"},"error":"Json error"}`+"\r\n" {
+ t.Fatalf("%q", res)
+ }
+}
+
+func TestJSONFormatProgress(t *testing.T) {
+ sf := NewJSONStreamFormatter()
+ progress := &jsonmessage.JSONProgress{
+ Current: 15,
+ Total: 30,
+ Start: 1,
+ }
+ res := sf.FormatProgress("id", "action", progress, nil)
+ msg := &jsonmessage.JSONMessage{}
+ if err := json.Unmarshal(res, msg); err != nil {
+ t.Fatal(err)
+ }
+ if msg.ID != "id" {
+ t.Fatalf("ID must be 'id', got: %s", msg.ID)
+ }
+ if msg.Status != "action" {
+ t.Fatalf("Status must be 'action', got: %s", msg.Status)
+ }
+
+ // The progress will always be in the format of:
+ // [=========================> ] 15 B/30 B 404933h7m11s
+ // The last entry '404933h7m11s' is the timeLeftBox.
+ // However, the timeLeftBox field may change as progress.String() depends on time.Now().
+ // Therefore, we have to strip the timeLeftBox from the strings to do the comparison.
+
+ // Compare the progress strings before the timeLeftBox
+ expectedProgress := "[=========================> ] 15 B/30 B"
+ // if terminal column is <= 110, expectedProgressShort is expected.
+ expectedProgressShort := " 15 B/30 B"
+ if !(strings.HasPrefix(msg.ProgressMessage, expectedProgress) ||
+ strings.HasPrefix(msg.ProgressMessage, expectedProgressShort)) {
+ t.Fatalf("ProgressMessage without the timeLeftBox must be %s or %s, got: %s",
+ expectedProgress, expectedProgressShort, msg.ProgressMessage)
+ }
+
+ if !reflect.DeepEqual(msg.Progress, progress) {
+ t.Fatal("Original progress not equals progress from FormatProgress")
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/stringid/README.md b/vendor/github.com/docker/docker/pkg/stringid/README.md
new file mode 100644
index 00000000000..37a5098fd98
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/stringid/README.md
@@ -0,0 +1 @@
+This package provides helper functions for dealing with string identifiers
diff --git a/vendor/github.com/docker/docker/pkg/stringid/stringid.go b/vendor/github.com/docker/docker/pkg/stringid/stringid.go
new file mode 100644
index 00000000000..161184ff8a4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/stringid/stringid.go
@@ -0,0 +1,71 @@
+// Package stringid provides helper functions for dealing with string identifiers
+package stringid
+
+import (
+ "crypto/rand"
+ "encoding/hex"
+ "io"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/docker/docker/pkg/random"
+)
+
+const shortLen = 12
+
+var validShortID = regexp.MustCompile("^[a-z0-9]{12}$")
+
+// IsShortID determines if an arbitrary string *looks like* a short ID.
+func IsShortID(id string) bool {
+ return validShortID.MatchString(id)
+}
+
+// TruncateID returns a shorthand version of a string identifier for convenience.
+// A collision with other shorthands is very unlikely, but possible.
+// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller
+// will need to use a longer prefix, or the full-length Id.
+func TruncateID(id string) string {
+ if i := strings.IndexRune(id, ':'); i >= 0 {
+ id = id[i+1:]
+ }
+ trimTo := shortLen
+ if len(id) < shortLen {
+ trimTo = len(id)
+ }
+ return id[:trimTo]
+}
+
+func generateID(crypto bool) string {
+ b := make([]byte, 32)
+ r := random.Reader
+ if crypto {
+ r = rand.Reader
+ }
+ for {
+ if _, err := io.ReadFull(r, b); err != nil {
+ panic(err) // This shouldn't happen
+ }
+ id := hex.EncodeToString(b)
+ // if we try to parse the truncated for as an int and we don't have
+ // an error then the value is all numeric and causes issues when
+ // used as a hostname. ref #3869
+ if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil {
+ continue
+ }
+ return id
+ }
+}
+
+// GenerateRandomID returns a unique id.
+func GenerateRandomID() string {
+ return generateID(true)
+
+}
+
+// GenerateNonCryptoID generates unique id without using cryptographically
+// secure sources of random.
+// It helps you to save entropy.
+func GenerateNonCryptoID() string {
+ return generateID(false)
+}
diff --git a/vendor/github.com/docker/docker/pkg/stringid/stringid_test.go b/vendor/github.com/docker/docker/pkg/stringid/stringid_test.go
new file mode 100644
index 00000000000..bcb13654955
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/stringid/stringid_test.go
@@ -0,0 +1,56 @@
+package stringid
+
+import (
+ "strings"
+ "testing"
+)
+
+func TestGenerateRandomID(t *testing.T) {
+ id := GenerateRandomID()
+
+ if len(id) != 64 {
+ t.Fatalf("Id returned is incorrect: %s", id)
+ }
+}
+
+func TestShortenId(t *testing.T) {
+ id := GenerateRandomID()
+ truncID := TruncateID(id)
+ if len(truncID) != 12 {
+ t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID)
+ }
+}
+
+func TestShortenIdEmpty(t *testing.T) {
+ id := ""
+ truncID := TruncateID(id)
+ if len(truncID) > len(id) {
+ t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID)
+ }
+}
+
+func TestShortenIdInvalid(t *testing.T) {
+ id := "1234"
+ truncID := TruncateID(id)
+ if len(truncID) != len(id) {
+ t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID)
+ }
+}
+
+func TestIsShortIDNonHex(t *testing.T) {
+ id := "some non-hex value"
+ if IsShortID(id) {
+ t.Fatalf("%s is not a short ID", id)
+ }
+}
+
+func TestIsShortIDNotCorrectSize(t *testing.T) {
+ id := strings.Repeat("a", shortLen+1)
+ if IsShortID(id) {
+ t.Fatalf("%s is not a short ID", id)
+ }
+ id = strings.Repeat("a", shortLen-1)
+ if IsShortID(id) {
+ t.Fatalf("%s is not a short ID", id)
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/stringutils/README.md b/vendor/github.com/docker/docker/pkg/stringutils/README.md
new file mode 100644
index 00000000000..b3e454573c3
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/stringutils/README.md
@@ -0,0 +1 @@
+This package provides helper functions for dealing with strings
diff --git a/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go b/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go
new file mode 100644
index 00000000000..7c00b972ddf
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/stringutils/stringutils.go
@@ -0,0 +1,87 @@
+// Package stringutils provides helper functions for dealing with strings.
+package stringutils
+
+import (
+ "bytes"
+ "math/rand"
+ "strings"
+
+ "github.com/docker/docker/pkg/random"
+)
+
+// GenerateRandomAlphaOnlyString generates an alphabetical random string with length n.
+func GenerateRandomAlphaOnlyString(n int) string {
+ // make a really long string
+ letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
+ b := make([]byte, n)
+ for i := range b {
+ b[i] = letters[random.Rand.Intn(len(letters))]
+ }
+ return string(b)
+}
+
+// GenerateRandomASCIIString generates an ASCII random string with length n.
+func GenerateRandomASCIIString(n int) string {
+ chars := "abcdefghijklmnopqrstuvwxyz" +
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
+ "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` "
+ res := make([]byte, n)
+ for i := 0; i < n; i++ {
+ res[i] = chars[rand.Intn(len(chars))]
+ }
+ return string(res)
+}
+
+// Truncate truncates a string to maxlen.
+func Truncate(s string, maxlen int) string {
+ if len(s) <= maxlen {
+ return s
+ }
+ return s[:maxlen]
+}
+
+// InSlice tests whether a string is contained in a slice of strings or not.
+// Comparison is case insensitive
+func InSlice(slice []string, s string) bool {
+ for _, ss := range slice {
+ if strings.ToLower(s) == strings.ToLower(ss) {
+ return true
+ }
+ }
+ return false
+}
+
+func quote(word string, buf *bytes.Buffer) {
+ // Bail out early for "simple" strings
+ if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") {
+ buf.WriteString(word)
+ return
+ }
+
+ buf.WriteString("'")
+
+ for i := 0; i < len(word); i++ {
+ b := word[i]
+ if b == '\'' {
+ // Replace literal ' with a close ', a \', and a open '
+ buf.WriteString("'\\''")
+ } else {
+ buf.WriteByte(b)
+ }
+ }
+
+ buf.WriteString("'")
+}
+
+// ShellQuoteArguments takes a list of strings and escapes them so they will be
+// handled right when passed as arguments to a program via a shell
+func ShellQuoteArguments(args []string) string {
+ var buf bytes.Buffer
+ for i, arg := range args {
+ if i != 0 {
+ buf.WriteByte(' ')
+ }
+ quote(arg, &buf)
+ }
+ return buf.String()
+}
diff --git a/vendor/github.com/docker/docker/pkg/stringutils/stringutils_test.go b/vendor/github.com/docker/docker/pkg/stringutils/stringutils_test.go
new file mode 100644
index 00000000000..fec59450bce
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/stringutils/stringutils_test.go
@@ -0,0 +1,105 @@
+package stringutils
+
+import "testing"
+
+func testLengthHelper(generator func(int) string, t *testing.T) {
+ expectedLength := 20
+ s := generator(expectedLength)
+ if len(s) != expectedLength {
+ t.Fatalf("Length of %s was %d but expected length %d", s, len(s), expectedLength)
+ }
+}
+
+func testUniquenessHelper(generator func(int) string, t *testing.T) {
+ repeats := 25
+ set := make(map[string]struct{}, repeats)
+ for i := 0; i < repeats; i = i + 1 {
+ str := generator(64)
+ if len(str) != 64 {
+ t.Fatalf("Id returned is incorrect: %s", str)
+ }
+ if _, ok := set[str]; ok {
+ t.Fatalf("Random number is repeated")
+ }
+ set[str] = struct{}{}
+ }
+}
+
+func isASCII(s string) bool {
+ for _, c := range s {
+ if c > 127 {
+ return false
+ }
+ }
+ return true
+}
+
+func TestGenerateRandomAlphaOnlyStringLength(t *testing.T) {
+ testLengthHelper(GenerateRandomAlphaOnlyString, t)
+}
+
+func TestGenerateRandomAlphaOnlyStringUniqueness(t *testing.T) {
+ testUniquenessHelper(GenerateRandomAlphaOnlyString, t)
+}
+
+func TestGenerateRandomAsciiStringLength(t *testing.T) {
+ testLengthHelper(GenerateRandomASCIIString, t)
+}
+
+func TestGenerateRandomAsciiStringUniqueness(t *testing.T) {
+ testUniquenessHelper(GenerateRandomASCIIString, t)
+}
+
+func TestGenerateRandomAsciiStringIsAscii(t *testing.T) {
+ str := GenerateRandomASCIIString(64)
+ if !isASCII(str) {
+ t.Fatalf("%s contained non-ascii characters", str)
+ }
+}
+
+func TestTruncate(t *testing.T) {
+ str := "teststring"
+ newstr := Truncate(str, 4)
+ if newstr != "test" {
+ t.Fatalf("Expected test, got %s", newstr)
+ }
+ newstr = Truncate(str, 20)
+ if newstr != "teststring" {
+ t.Fatalf("Expected teststring, got %s", newstr)
+ }
+}
+
+func TestInSlice(t *testing.T) {
+ slice := []string{"test", "in", "slice"}
+
+ test := InSlice(slice, "test")
+ if !test {
+ t.Fatalf("Expected string test to be in slice")
+ }
+ test = InSlice(slice, "SLICE")
+ if !test {
+ t.Fatalf("Expected string SLICE to be in slice")
+ }
+ test = InSlice(slice, "notinslice")
+ if test {
+ t.Fatalf("Expected string notinslice not to be in slice")
+ }
+}
+
+func TestShellQuoteArgumentsEmpty(t *testing.T) {
+ actual := ShellQuoteArguments([]string{})
+ expected := ""
+ if actual != expected {
+ t.Fatalf("Expected an empty string")
+ }
+}
+
+func TestShellQuoteArguments(t *testing.T) {
+ simpleString := "simpleString"
+ complexString := "This is a 'more' complex $tring with some special char *"
+ actual := ShellQuoteArguments([]string{simpleString, complexString})
+ expected := "simpleString 'This is a '\\''more'\\'' complex $tring with some special char *'"
+ if actual != expected {
+ t.Fatalf("Expected \"%v\", got \"%v\"", expected, actual)
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE
new file mode 100644
index 00000000000..34c4ea7c505
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014-2016 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD
new file mode 100644
index 00000000000..9b4f4a294ea
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD
@@ -0,0 +1,27 @@
+Copyright (c) 2014-2016 The Docker & Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/docker/docker/pkg/symlink/README.md b/vendor/github.com/docker/docker/pkg/symlink/README.md
new file mode 100644
index 00000000000..8dba54fd089
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/symlink/README.md
@@ -0,0 +1,6 @@
+Package symlink implements EvalSymlinksInScope which is an extension of filepath.EvalSymlinks,
+as well as a Windows long-path aware version of filepath.EvalSymlinks
+from the [Go standard library](https://golang.org/pkg/path/filepath).
+
+The code from filepath.EvalSymlinks has been adapted in fs.go.
+Please read the LICENSE.BSD file that governs fs.go and LICENSE.APACHE for fs_test.go.
diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs.go b/vendor/github.com/docker/docker/pkg/symlink/fs.go
new file mode 100644
index 00000000000..dcf707f426c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/symlink/fs.go
@@ -0,0 +1,143 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.BSD file.
+
+// This code is a modified version of path/filepath/symlink.go from the Go standard library.
+
+package symlink
+
+import (
+ "bytes"
+ "errors"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/docker/docker/pkg/system"
+)
+
+// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an
+// absolute path. This function handles paths in a platform-agnostic manner.
+func FollowSymlinkInScope(path, root string) (string, error) {
+ path, err := filepath.Abs(filepath.FromSlash(path))
+ if err != nil {
+ return "", err
+ }
+ root, err = filepath.Abs(filepath.FromSlash(root))
+ if err != nil {
+ return "", err
+ }
+ return evalSymlinksInScope(path, root)
+}
+
+// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return
+// a result guaranteed to be contained within the scope `root`, at the time of the call.
+// Symlinks in `root` are not evaluated and left as-is.
+// Errors encountered while attempting to evaluate symlinks in path will be returned.
+// Non-existing paths are valid and do not constitute an error.
+// `path` has to contain `root` as a prefix, or else an error will be returned.
+// Trying to break out from `root` does not constitute an error.
+//
+// Example:
+// If /foo/bar -> /outside,
+// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/oustide"
+//
+// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks
+// are created and not to create subsequently, additional symlinks that could potentially make a
+// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo")
+// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should
+// no longer be considered safely contained in "/foo".
+func evalSymlinksInScope(path, root string) (string, error) {
+ root = filepath.Clean(root)
+ if path == root {
+ return path, nil
+ }
+ if !strings.HasPrefix(path, root) {
+ return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root)
+ }
+ const maxIter = 255
+ originalPath := path
+ // given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c"
+ path = path[len(root):]
+ if root == string(filepath.Separator) {
+ path = string(filepath.Separator) + path
+ }
+ if !strings.HasPrefix(path, string(filepath.Separator)) {
+ return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root)
+ }
+ path = filepath.Clean(path)
+ // consume path by taking each frontmost path element,
+ // expanding it if it's a symlink, and appending it to b
+ var b bytes.Buffer
+ // b here will always be considered to be the "current absolute path inside
+ // root" when we append paths to it, we also append a slash and use
+ // filepath.Clean after the loop to trim the trailing slash
+ for n := 0; path != ""; n++ {
+ if n > maxIter {
+ return "", errors.New("evalSymlinksInScope: too many links in " + originalPath)
+ }
+
+ // find next path component, p
+ i := strings.IndexRune(path, filepath.Separator)
+ var p string
+ if i == -1 {
+ p, path = path, ""
+ } else {
+ p, path = path[:i], path[i+1:]
+ }
+
+ if p == "" {
+ continue
+ }
+
+ // this takes a b.String() like "b/../" and a p like "c" and turns it
+ // into "/b/../c" which then gets filepath.Cleaned into "/c" and then
+ // root gets prepended and we Clean again (to remove any trailing slash
+ // if the first Clean gave us just "/")
+ cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p)
+ if cleanP == string(filepath.Separator) {
+ // never Lstat "/" itself
+ b.Reset()
+ continue
+ }
+ fullP := filepath.Clean(root + cleanP)
+
+ fi, err := os.Lstat(fullP)
+ if os.IsNotExist(err) {
+ // if p does not exist, accept it
+ b.WriteString(p)
+ b.WriteRune(filepath.Separator)
+ continue
+ }
+ if err != nil {
+ return "", err
+ }
+ if fi.Mode()&os.ModeSymlink == 0 {
+ b.WriteString(p + string(filepath.Separator))
+ continue
+ }
+
+ // it's a symlink, put it at the front of path
+ dest, err := os.Readlink(fullP)
+ if err != nil {
+ return "", err
+ }
+ if system.IsAbs(dest) {
+ b.Reset()
+ }
+ path = dest + string(filepath.Separator) + path
+ }
+
+ // see note above on "fullP := ..." for why this is double-cleaned and
+ // what's happening here
+ return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil
+}
+
+// EvalSymlinks returns the path name after the evaluation of any symbolic
+// links.
+// If path is relative the result will be relative to the current directory,
+// unless one of the components is an absolute symbolic link.
+// This version has been updated to support long paths prepended with `\\?\`.
+func EvalSymlinks(path string) (string, error) {
+ return evalSymlinks(path)
+}
diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go b/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go
new file mode 100644
index 00000000000..818004f26c0
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go
@@ -0,0 +1,11 @@
+// +build !windows
+
+package symlink
+
+import (
+ "path/filepath"
+)
+
+func evalSymlinks(path string) (string, error) {
+ return filepath.EvalSymlinks(path)
+}
diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_unix_test.go b/vendor/github.com/docker/docker/pkg/symlink/fs_unix_test.go
new file mode 100644
index 00000000000..7085c0b666e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/symlink/fs_unix_test.go
@@ -0,0 +1,407 @@
+// +build !windows
+
+// Licensed under the Apache License, Version 2.0; See LICENSE.APACHE
+
+package symlink
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+// TODO Windows: This needs some serious work to port to Windows. For now,
+// turning off testing in this package.
+
+type dirOrLink struct {
+ path string
+ target string
+}
+
+func makeFs(tmpdir string, fs []dirOrLink) error {
+ for _, s := range fs {
+ s.path = filepath.Join(tmpdir, s.path)
+ if s.target == "" {
+ os.MkdirAll(s.path, 0755)
+ continue
+ }
+ if err := os.MkdirAll(filepath.Dir(s.path), 0755); err != nil {
+ return err
+ }
+ if err := os.Symlink(s.target, s.path); err != nil && !os.IsExist(err) {
+ return err
+ }
+ }
+ return nil
+}
+
+func testSymlink(tmpdir, path, expected, scope string) error {
+ rewrite, err := FollowSymlinkInScope(filepath.Join(tmpdir, path), filepath.Join(tmpdir, scope))
+ if err != nil {
+ return err
+ }
+ expected, err = filepath.Abs(filepath.Join(tmpdir, expected))
+ if err != nil {
+ return err
+ }
+ if expected != rewrite {
+ return fmt.Errorf("Expected %q got %q", expected, rewrite)
+ }
+ return nil
+}
+
+func TestFollowSymlinkAbsolute(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkAbsolute")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+ if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "testdata/fs/a/d/c/data", "testdata/b/c/data", "testdata"); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestFollowSymlinkRelativePath(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+ if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/i", target: "a"}}); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "testdata/fs/i", "testdata/fs/a", "testdata"); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestFollowSymlinkSkipSymlinksOutsideScope(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSkipSymlinksOutsideScope")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+ if err := makeFs(tmpdir, []dirOrLink{
+ {path: "linkdir", target: "realdir"},
+ {path: "linkdir/foo/bar"},
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "linkdir/foo/bar", "linkdir/foo/bar", "linkdir/foo"); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestFollowSymlinkInvalidScopePathPair(t *testing.T) {
+ if _, err := FollowSymlinkInScope("toto", "testdata"); err == nil {
+ t.Fatal("expected an error")
+ }
+}
+
+func TestFollowSymlinkLastLink(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkLastLink")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+ if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "testdata/fs/a/d", "testdata/b", "testdata"); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestFollowSymlinkRelativeLinkChangeScope(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChangeScope")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+ if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/e", target: "../b"}}); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "testdata/fs/a/e/c/data", "testdata/fs/b/c/data", "testdata"); err != nil {
+ t.Fatal(err)
+ }
+ // avoid letting allowing symlink e lead us to ../b
+ // normalize to the "testdata/fs/a"
+ if err := testSymlink(tmpdir, "testdata/fs/a/e", "testdata/fs/a/b", "testdata/fs/a"); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestFollowSymlinkDeepRelativeLinkChangeScope(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDeepRelativeLinkChangeScope")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+
+ if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/f", target: "../../../../test"}}); err != nil {
+ t.Fatal(err)
+ }
+ // avoid letting symlink f lead us out of the "testdata" scope
+ // we don't normalize because symlink f is in scope and there is no
+ // information leak
+ if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/test", "testdata"); err != nil {
+ t.Fatal(err)
+ }
+ // avoid letting symlink f lead us out of the "testdata/fs" scope
+ // we don't normalize because symlink f is in scope and there is no
+ // information leak
+ if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/fs/test", "testdata/fs"); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestFollowSymlinkRelativeLinkChain(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChain")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+
+ // avoid letting symlink g (pointed at by symlink h) take out of scope
+ // TODO: we should probably normalize to scope here because ../[....]/root
+ // is out of scope and we leak information
+ if err := makeFs(tmpdir, []dirOrLink{
+ {path: "testdata/fs/b/h", target: "../g"},
+ {path: "testdata/fs/g", target: "../../../../../../../../../../../../root"},
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "testdata/fs/b/h", "testdata/root", "testdata"); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestFollowSymlinkBreakoutPath(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutPath")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+
+ // avoid letting symlink -> ../directory/file escape from scope
+ // normalize to "testdata/fs/j"
+ if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/j/k", target: "../i/a"}}); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "testdata/fs/j/k", "testdata/fs/j/i/a", "testdata/fs/j"); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestFollowSymlinkToRoot(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkToRoot")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+
+ // make sure we don't allow escaping to /
+ // normalize to dir
+ if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/"}}); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "foo", "", ""); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestFollowSymlinkSlashDotdot(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSlashDotdot")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+ tmpdir = filepath.Join(tmpdir, "dir", "subdir")
+
+ // make sure we don't allow escaping to /
+ // normalize to dir
+ if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/../../"}}); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "foo", "", ""); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestFollowSymlinkDotdot(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDotdot")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+ tmpdir = filepath.Join(tmpdir, "dir", "subdir")
+
+ // make sure we stay in scope without leaking information
+ // this also checks for escaping to /
+ // normalize to dir
+ if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "../../"}}); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "foo", "", ""); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestFollowSymlinkRelativePath2(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath2")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+
+ if err := makeFs(tmpdir, []dirOrLink{{path: "bar/foo", target: "baz/target"}}); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "bar/foo", "bar/baz/target", ""); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestFollowSymlinkScopeLink(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkScopeLink")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+
+ if err := makeFs(tmpdir, []dirOrLink{
+ {path: "root2"},
+ {path: "root", target: "root2"},
+ {path: "root2/foo", target: "../bar"},
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "root/foo", "root/bar", "root"); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestFollowSymlinkRootScope(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRootScope")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+
+ expected, err := filepath.EvalSymlinks(tmpdir)
+ if err != nil {
+ t.Fatal(err)
+ }
+ rewrite, err := FollowSymlinkInScope(tmpdir, "/")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if rewrite != expected {
+ t.Fatalf("expected %q got %q", expected, rewrite)
+ }
+}
+
+func TestFollowSymlinkEmpty(t *testing.T) {
+ res, err := FollowSymlinkInScope("", "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ wd, err := os.Getwd()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res != wd {
+ t.Fatalf("expected %q got %q", wd, res)
+ }
+}
+
+func TestFollowSymlinkCircular(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkCircular")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+
+ if err := makeFs(tmpdir, []dirOrLink{{path: "root/foo", target: "foo"}}); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil {
+ t.Fatal("expected an error for foo -> foo")
+ }
+
+ if err := makeFs(tmpdir, []dirOrLink{
+ {path: "root/bar", target: "baz"},
+ {path: "root/baz", target: "../bak"},
+ {path: "root/bak", target: "/bar"},
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil {
+ t.Fatal("expected an error for bar -> baz -> bak -> bar")
+ }
+}
+
+func TestFollowSymlinkComplexChainWithTargetPathsContainingLinks(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkComplexChainWithTargetPathsContainingLinks")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+
+ if err := makeFs(tmpdir, []dirOrLink{
+ {path: "root2"},
+ {path: "root", target: "root2"},
+ {path: "root/a", target: "r/s"},
+ {path: "root/r", target: "../root/t"},
+ {path: "root/root/t/s/b", target: "/../u"},
+ {path: "root/u/c", target: "."},
+ {path: "root/u/x/y", target: "../v"},
+ {path: "root/u/v", target: "/../w"},
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "root/a/b/c/x/y/z", "root/w/z", "root"); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestFollowSymlinkBreakoutNonExistent(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutNonExistent")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+
+ if err := makeFs(tmpdir, []dirOrLink{
+ {path: "root/slash", target: "/"},
+ {path: "root/sym", target: "/idontexist/../slash"},
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "root/sym/file", "root/file", "root"); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestFollowSymlinkNoLexicalCleaning(t *testing.T) {
+ tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkNoLexicalCleaning")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+
+ if err := makeFs(tmpdir, []dirOrLink{
+ {path: "root/sym", target: "/foo/bar"},
+ {path: "root/hello", target: "/sym/../baz"},
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if err := testSymlink(tmpdir, "root/hello", "root/foo/baz", "root"); err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go b/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go
new file mode 100644
index 00000000000..449fe564832
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go
@@ -0,0 +1,155 @@
+package symlink
+
+import (
+ "bytes"
+ "errors"
+ "os"
+ "path/filepath"
+ "strings"
+ "syscall"
+
+ "github.com/docker/docker/pkg/longpath"
+)
+
+func toShort(path string) (string, error) {
+ p, err := syscall.UTF16FromString(path)
+ if err != nil {
+ return "", err
+ }
+ b := p // GetShortPathName says we can reuse buffer
+ n, err := syscall.GetShortPathName(&p[0], &b[0], uint32(len(b)))
+ if err != nil {
+ return "", err
+ }
+ if n > uint32(len(b)) {
+ b = make([]uint16, n)
+ if _, err = syscall.GetShortPathName(&p[0], &b[0], uint32(len(b))); err != nil {
+ return "", err
+ }
+ }
+ return syscall.UTF16ToString(b), nil
+}
+
+func toLong(path string) (string, error) {
+ p, err := syscall.UTF16FromString(path)
+ if err != nil {
+ return "", err
+ }
+ b := p // GetLongPathName says we can reuse buffer
+ n, err := syscall.GetLongPathName(&p[0], &b[0], uint32(len(b)))
+ if err != nil {
+ return "", err
+ }
+ if n > uint32(len(b)) {
+ b = make([]uint16, n)
+ n, err = syscall.GetLongPathName(&p[0], &b[0], uint32(len(b)))
+ if err != nil {
+ return "", err
+ }
+ }
+ b = b[:n]
+ return syscall.UTF16ToString(b), nil
+}
+
+func evalSymlinks(path string) (string, error) {
+ path, err := walkSymlinks(path)
+ if err != nil {
+ return "", err
+ }
+
+ p, err := toShort(path)
+ if err != nil {
+ return "", err
+ }
+ p, err = toLong(p)
+ if err != nil {
+ return "", err
+ }
+ // syscall.GetLongPathName does not change the case of the drive letter,
+ // but the result of EvalSymlinks must be unique, so we have
+ // EvalSymlinks(`c:\a`) == EvalSymlinks(`C:\a`).
+ // Make drive letter upper case.
+ if len(p) >= 2 && p[1] == ':' && 'a' <= p[0] && p[0] <= 'z' {
+ p = string(p[0]+'A'-'a') + p[1:]
+ } else if len(p) >= 6 && p[5] == ':' && 'a' <= p[4] && p[4] <= 'z' {
+ p = p[:3] + string(p[4]+'A'-'a') + p[5:]
+ }
+ return filepath.Clean(p), nil
+}
+
+const utf8RuneSelf = 0x80
+
+func walkSymlinks(path string) (string, error) {
+ const maxIter = 255
+ originalPath := path
+ // consume path by taking each frontmost path element,
+ // expanding it if it's a symlink, and appending it to b
+ var b bytes.Buffer
+ for n := 0; path != ""; n++ {
+ if n > maxIter {
+ return "", errors.New("EvalSymlinks: too many links in " + originalPath)
+ }
+
+ // A path beginning with `\\?\` represents the root, so automatically
+ // skip that part and begin processing the next segment.
+ if strings.HasPrefix(path, longpath.Prefix) {
+ b.WriteString(longpath.Prefix)
+ path = path[4:]
+ continue
+ }
+
+ // find next path component, p
+ var i = -1
+ for j, c := range path {
+ if c < utf8RuneSelf && os.IsPathSeparator(uint8(c)) {
+ i = j
+ break
+ }
+ }
+ var p string
+ if i == -1 {
+ p, path = path, ""
+ } else {
+ p, path = path[:i], path[i+1:]
+ }
+
+ if p == "" {
+ if b.Len() == 0 {
+ // must be absolute path
+ b.WriteRune(filepath.Separator)
+ }
+ continue
+ }
+
+ // If this is the first segment after the long path prefix, accept the
+ // current segment as a volume root or UNC share and move on to the next.
+ if b.String() == longpath.Prefix {
+ b.WriteString(p)
+ b.WriteRune(filepath.Separator)
+ continue
+ }
+
+ fi, err := os.Lstat(b.String() + p)
+ if err != nil {
+ return "", err
+ }
+ if fi.Mode()&os.ModeSymlink == 0 {
+ b.WriteString(p)
+ if path != "" || (b.Len() == 2 && len(p) == 2 && p[1] == ':') {
+ b.WriteRune(filepath.Separator)
+ }
+ continue
+ }
+
+ // it's a symlink, put it at the front of path
+ dest, err := os.Readlink(b.String() + p)
+ if err != nil {
+ return "", err
+ }
+ if filepath.IsAbs(dest) || os.IsPathSeparator(dest[0]) {
+ b.Reset()
+ }
+ path = dest + string(filepath.Separator) + path
+ }
+ return filepath.Clean(b.String()), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/README.md b/vendor/github.com/docker/docker/pkg/sysinfo/README.md
new file mode 100644
index 00000000000..c1530cef0da
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/sysinfo/README.md
@@ -0,0 +1 @@
+SysInfo stores information about which features a kernel supports.
diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu.go b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu.go
new file mode 100644
index 00000000000..aeb1a3a8040
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu.go
@@ -0,0 +1,12 @@
+// +build !linux,!windows
+
+package sysinfo
+
+import (
+ "runtime"
+)
+
+// NumCPU returns the number of CPUs
+func NumCPU() int {
+ return runtime.NumCPU()
+}
diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_linux.go b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_linux.go
new file mode 100644
index 00000000000..5eacd35121b
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_linux.go
@@ -0,0 +1,43 @@
+// +build linux
+
+package sysinfo
+
+import (
+ "runtime"
+ "syscall"
+ "unsafe"
+)
+
+// numCPU queries the system for the count of threads available
+// for use to this process.
+//
+// Issues two syscalls.
+// Returns 0 on errors. Use |runtime.NumCPU| in that case.
+func numCPU() int {
+ // Gets the affinity mask for a process: The very one invoking this function.
+ pid, _, _ := syscall.RawSyscall(syscall.SYS_GETPID, 0, 0, 0)
+
+ var mask [1024 / 64]uintptr
+ _, _, err := syscall.RawSyscall(syscall.SYS_SCHED_GETAFFINITY, pid, uintptr(len(mask)*8), uintptr(unsafe.Pointer(&mask[0])))
+ if err != 0 {
+ return 0
+ }
+
+ // For every available thread a bit is set in the mask.
+ ncpu := 0
+ for _, e := range mask {
+ if e == 0 {
+ continue
+ }
+ ncpu += int(popcnt(uint64(e)))
+ }
+ return ncpu
+}
+
+// NumCPU returns the number of CPUs which are currently online
+func NumCPU() int {
+ if ncpu := numCPU(); ncpu > 0 {
+ return ncpu
+ }
+ return runtime.NumCPU()
+}
diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_windows.go b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_windows.go
new file mode 100644
index 00000000000..5077af7298c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_windows.go
@@ -0,0 +1,36 @@
+// +build windows
+
+package sysinfo
+
+import (
+ "runtime"
+ "syscall"
+ "unsafe"
+)
+
+var (
+ kernel32 = syscall.NewLazyDLL("kernel32.dll")
+ getCurrentProcess = kernel32.NewProc("GetCurrentProcess")
+ getProcessAffinityMask = kernel32.NewProc("GetProcessAffinityMask")
+)
+
+func numCPU() int {
+ // Gets the affinity mask for a process
+ var mask, sysmask uintptr
+ currentProcess, _, _ := getCurrentProcess.Call()
+ ret, _, _ := getProcessAffinityMask.Call(currentProcess, uintptr(unsafe.Pointer(&mask)), uintptr(unsafe.Pointer(&sysmask)))
+ if ret == 0 {
+ return 0
+ }
+ // For every available thread a bit is set in the mask.
+ ncpu := int(popcnt(uint64(mask)))
+ return ncpu
+}
+
+// NumCPU returns the number of CPUs which are currently online
+func NumCPU() int {
+ if ncpu := numCPU(); ncpu > 0 {
+ return ncpu
+ }
+ return runtime.NumCPU()
+}
diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go
new file mode 100644
index 00000000000..dc71dbfb80c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go
@@ -0,0 +1,138 @@
+package sysinfo
+
+import "github.com/docker/docker/pkg/parsers"
+
+// SysInfo stores information about which features a kernel supports.
+// TODO Windows: Factor out platform specific capabilities.
+type SysInfo struct {
+ // Whether the kernel supports AppArmor or not
+ AppArmor bool
+ // Whether the kernel supports Seccomp or not
+ Seccomp bool
+
+ cgroupMemInfo
+ cgroupCPUInfo
+ cgroupBlkioInfo
+ cgroupCpusetInfo
+ cgroupPids
+
+ // Whether IPv4 forwarding is supported or not, if this was disabled, networking will not work
+ IPv4ForwardingDisabled bool
+
+ // Whether bridge-nf-call-iptables is supported or not
+ BridgeNFCallIPTablesDisabled bool
+
+ // Whether bridge-nf-call-ip6tables is supported or not
+ BridgeNFCallIP6TablesDisabled bool
+
+ // Whether the cgroup has the mountpoint of "devices" or not
+ CgroupDevicesEnabled bool
+}
+
+type cgroupMemInfo struct {
+ // Whether memory limit is supported or not
+ MemoryLimit bool
+
+ // Whether swap limit is supported or not
+ SwapLimit bool
+
+ // Whether soft limit is supported or not
+ MemoryReservation bool
+
+ // Whether OOM killer disable is supported or not
+ OomKillDisable bool
+
+ // Whether memory swappiness is supported or not
+ MemorySwappiness bool
+
+ // Whether kernel memory limit is supported or not
+ KernelMemory bool
+}
+
+type cgroupCPUInfo struct {
+ // Whether CPU shares is supported or not
+ CPUShares bool
+
+ // Whether CPU CFS(Completely Fair Scheduler) period is supported or not
+ CPUCfsPeriod bool
+
+ // Whether CPU CFS(Completely Fair Scheduler) quota is supported or not
+ CPUCfsQuota bool
+}
+
+type cgroupBlkioInfo struct {
+ // Whether Block IO weight is supported or not
+ BlkioWeight bool
+
+ // Whether Block IO weight_device is supported or not
+ BlkioWeightDevice bool
+
+ // Whether Block IO read limit in bytes per second is supported or not
+ BlkioReadBpsDevice bool
+
+ // Whether Block IO write limit in bytes per second is supported or not
+ BlkioWriteBpsDevice bool
+
+ // Whether Block IO read limit in IO per second is supported or not
+ BlkioReadIOpsDevice bool
+
+ // Whether Block IO write limit in IO per second is supported or not
+ BlkioWriteIOpsDevice bool
+}
+
+type cgroupCpusetInfo struct {
+ // Whether Cpuset is supported or not
+ Cpuset bool
+
+ // Available Cpuset's cpus
+ Cpus string
+
+ // Available Cpuset's memory nodes
+ Mems string
+}
+
+type cgroupPids struct {
+ // Whether Pids Limit is supported or not
+ PidsLimit bool
+}
+
+// IsCpusetCpusAvailable returns `true` if the provided string set is contained
+// in cgroup's cpuset.cpus set, `false` otherwise.
+// If error is not nil a parsing error occurred.
+func (c cgroupCpusetInfo) IsCpusetCpusAvailable(provided string) (bool, error) {
+ return isCpusetListAvailable(provided, c.Cpus)
+}
+
+// IsCpusetMemsAvailable returns `true` if the provided string set is contained
+// in cgroup's cpuset.mems set, `false` otherwise.
+// If error is not nil a parsing error occurred.
+func (c cgroupCpusetInfo) IsCpusetMemsAvailable(provided string) (bool, error) {
+ return isCpusetListAvailable(provided, c.Mems)
+}
+
+func isCpusetListAvailable(provided, available string) (bool, error) {
+ parsedProvided, err := parsers.ParseUintList(provided)
+ if err != nil {
+ return false, err
+ }
+ parsedAvailable, err := parsers.ParseUintList(available)
+ if err != nil {
+ return false, err
+ }
+ for k := range parsedProvided {
+ if !parsedAvailable[k] {
+ return false, nil
+ }
+ }
+ return true, nil
+}
+
+// Returns bit count of 1, used by NumCPU
+func popcnt(x uint64) (n byte) {
+ x -= (x >> 1) & 0x5555555555555555
+ x = (x>>2)&0x3333333333333333 + x&0x3333333333333333
+ x += x >> 4
+ x &= 0x0f0f0f0f0f0f0f0f
+ x *= 0x0101010101010101
+ return byte(x >> 56)
+}
diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_freebsd.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_freebsd.go
new file mode 100644
index 00000000000..22ae0d95a9d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_freebsd.go
@@ -0,0 +1,7 @@
+package sysinfo
+
+// New returns an empty SysInfo for freebsd for now.
+func New(quiet bool) *SysInfo {
+ sysInfo := &SysInfo{}
+ return sysInfo
+}
diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go
new file mode 100644
index 00000000000..8ba3ce4d39c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go
@@ -0,0 +1,246 @@
+package sysinfo
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "strings"
+ "syscall"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/opencontainers/runc/libcontainer/cgroups"
+)
+
+const (
+ // SeccompModeFilter refers to the syscall argument SECCOMP_MODE_FILTER.
+ SeccompModeFilter = uintptr(2)
+)
+
+func findCgroupMountpoints() (map[string]string, error) {
+ cgMounts, err := cgroups.GetCgroupMounts()
+ if err != nil {
+ return nil, fmt.Errorf("Failed to parse cgroup information: %v", err)
+ }
+ mps := make(map[string]string)
+ for _, m := range cgMounts {
+ for _, ss := range m.Subsystems {
+ mps[ss] = m.Mountpoint
+ }
+ }
+ return mps, nil
+}
+
+// New returns a new SysInfo, using the filesystem to detect which features
+// the kernel supports. If `quiet` is `false` warnings are printed in logs
+// whenever an error occurs or misconfigurations are present.
+func New(quiet bool) *SysInfo {
+ sysInfo := &SysInfo{}
+ cgMounts, err := findCgroupMountpoints()
+ if err != nil {
+ logrus.Warnf("Failed to parse cgroup information: %v", err)
+ } else {
+ sysInfo.cgroupMemInfo = checkCgroupMem(cgMounts, quiet)
+ sysInfo.cgroupCPUInfo = checkCgroupCPU(cgMounts, quiet)
+ sysInfo.cgroupBlkioInfo = checkCgroupBlkioInfo(cgMounts, quiet)
+ sysInfo.cgroupCpusetInfo = checkCgroupCpusetInfo(cgMounts, quiet)
+ sysInfo.cgroupPids = checkCgroupPids(quiet)
+ }
+
+ _, ok := cgMounts["devices"]
+ sysInfo.CgroupDevicesEnabled = ok
+
+ sysInfo.IPv4ForwardingDisabled = !readProcBool("/proc/sys/net/ipv4/ip_forward")
+ sysInfo.BridgeNFCallIPTablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-iptables")
+ sysInfo.BridgeNFCallIP6TablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-ip6tables")
+
+ // Check if AppArmor is supported.
+ if _, err := os.Stat("/sys/kernel/security/apparmor"); !os.IsNotExist(err) {
+ sysInfo.AppArmor = true
+ }
+
+ // Check if Seccomp is supported, via CONFIG_SECCOMP.
+ if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_SECCOMP, 0, 0); err != syscall.EINVAL {
+ // Make sure the kernel has CONFIG_SECCOMP_FILTER.
+ if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_SECCOMP, SeccompModeFilter, 0); err != syscall.EINVAL {
+ sysInfo.Seccomp = true
+ }
+ }
+
+ return sysInfo
+}
+
+// checkCgroupMem reads the memory information from the memory cgroup mount point.
+func checkCgroupMem(cgMounts map[string]string, quiet bool) cgroupMemInfo {
+ mountPoint, ok := cgMounts["memory"]
+ if !ok {
+ if !quiet {
+ logrus.Warn("Your kernel does not support cgroup memory limit")
+ }
+ return cgroupMemInfo{}
+ }
+
+ swapLimit := cgroupEnabled(mountPoint, "memory.memsw.limit_in_bytes")
+ if !quiet && !swapLimit {
+ logrus.Warn("Your kernel does not support swap memory limit.")
+ }
+ memoryReservation := cgroupEnabled(mountPoint, "memory.soft_limit_in_bytes")
+ if !quiet && !memoryReservation {
+ logrus.Warn("Your kernel does not support memory reservation.")
+ }
+ oomKillDisable := cgroupEnabled(mountPoint, "memory.oom_control")
+ if !quiet && !oomKillDisable {
+ logrus.Warn("Your kernel does not support oom control.")
+ }
+ memorySwappiness := cgroupEnabled(mountPoint, "memory.swappiness")
+ if !quiet && !memorySwappiness {
+ logrus.Warn("Your kernel does not support memory swappiness.")
+ }
+ kernelMemory := cgroupEnabled(mountPoint, "memory.kmem.limit_in_bytes")
+ if !quiet && !kernelMemory {
+ logrus.Warn("Your kernel does not support kernel memory limit.")
+ }
+
+ return cgroupMemInfo{
+ MemoryLimit: true,
+ SwapLimit: swapLimit,
+ MemoryReservation: memoryReservation,
+ OomKillDisable: oomKillDisable,
+ MemorySwappiness: memorySwappiness,
+ KernelMemory: kernelMemory,
+ }
+}
+
+// checkCgroupCPU reads the cpu information from the cpu cgroup mount point.
+func checkCgroupCPU(cgMounts map[string]string, quiet bool) cgroupCPUInfo {
+ mountPoint, ok := cgMounts["cpu"]
+ if !ok {
+ if !quiet {
+ logrus.Warn("Unable to find cpu cgroup in mounts")
+ }
+ return cgroupCPUInfo{}
+ }
+
+ cpuShares := cgroupEnabled(mountPoint, "cpu.shares")
+ if !quiet && !cpuShares {
+ logrus.Warn("Your kernel does not support cgroup cpu shares")
+ }
+
+ cpuCfsPeriod := cgroupEnabled(mountPoint, "cpu.cfs_period_us")
+ if !quiet && !cpuCfsPeriod {
+ logrus.Warn("Your kernel does not support cgroup cfs period")
+ }
+
+ cpuCfsQuota := cgroupEnabled(mountPoint, "cpu.cfs_quota_us")
+ if !quiet && !cpuCfsQuota {
+ logrus.Warn("Your kernel does not support cgroup cfs quotas")
+ }
+ return cgroupCPUInfo{
+ CPUShares: cpuShares,
+ CPUCfsPeriod: cpuCfsPeriod,
+ CPUCfsQuota: cpuCfsQuota,
+ }
+}
+
+// checkCgroupBlkioInfo reads the blkio information from the blkio cgroup mount point.
+func checkCgroupBlkioInfo(cgMounts map[string]string, quiet bool) cgroupBlkioInfo {
+ mountPoint, ok := cgMounts["blkio"]
+ if !ok {
+ if !quiet {
+ logrus.Warn("Unable to find blkio cgroup in mounts")
+ }
+ return cgroupBlkioInfo{}
+ }
+
+ weight := cgroupEnabled(mountPoint, "blkio.weight")
+ if !quiet && !weight {
+ logrus.Warn("Your kernel does not support cgroup blkio weight")
+ }
+
+ weightDevice := cgroupEnabled(mountPoint, "blkio.weight_device")
+ if !quiet && !weightDevice {
+ logrus.Warn("Your kernel does not support cgroup blkio weight_device")
+ }
+
+ readBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_bps_device")
+ if !quiet && !readBpsDevice {
+ logrus.Warn("Your kernel does not support cgroup blkio throttle.read_bps_device")
+ }
+
+ writeBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_bps_device")
+ if !quiet && !writeBpsDevice {
+ logrus.Warn("Your kernel does not support cgroup blkio throttle.write_bps_device")
+ }
+ readIOpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_iops_device")
+ if !quiet && !readIOpsDevice {
+ logrus.Warn("Your kernel does not support cgroup blkio throttle.read_iops_device")
+ }
+
+ writeIOpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_iops_device")
+ if !quiet && !writeIOpsDevice {
+ logrus.Warn("Your kernel does not support cgroup blkio throttle.write_iops_device")
+ }
+ return cgroupBlkioInfo{
+ BlkioWeight: weight,
+ BlkioWeightDevice: weightDevice,
+ BlkioReadBpsDevice: readBpsDevice,
+ BlkioWriteBpsDevice: writeBpsDevice,
+ BlkioReadIOpsDevice: readIOpsDevice,
+ BlkioWriteIOpsDevice: writeIOpsDevice,
+ }
+}
+
+// checkCgroupCpusetInfo reads the cpuset information from the cpuset cgroup mount point.
+func checkCgroupCpusetInfo(cgMounts map[string]string, quiet bool) cgroupCpusetInfo {
+ mountPoint, ok := cgMounts["cpuset"]
+ if !ok {
+ if !quiet {
+ logrus.Warn("Unable to find cpuset cgroup in mounts")
+ }
+ return cgroupCpusetInfo{}
+ }
+
+ cpus, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.cpus"))
+ if err != nil {
+ return cgroupCpusetInfo{}
+ }
+
+ mems, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.mems"))
+ if err != nil {
+ return cgroupCpusetInfo{}
+ }
+
+ return cgroupCpusetInfo{
+ Cpuset: true,
+ Cpus: strings.TrimSpace(string(cpus)),
+ Mems: strings.TrimSpace(string(mems)),
+ }
+}
+
+// checkCgroupPids reads the pids information from the pids cgroup mount point.
+func checkCgroupPids(quiet bool) cgroupPids {
+ _, err := cgroups.FindCgroupMountpoint("pids")
+ if err != nil {
+ if !quiet {
+ logrus.Warn(err)
+ }
+ return cgroupPids{}
+ }
+
+ return cgroupPids{
+ PidsLimit: true,
+ }
+}
+
+func cgroupEnabled(mountPoint, name string) bool {
+ _, err := os.Stat(path.Join(mountPoint, name))
+ return err == nil
+}
+
+func readProcBool(path string) bool {
+ val, err := ioutil.ReadFile(path)
+ if err != nil {
+ return false
+ }
+ return strings.TrimSpace(string(val)) == "1"
+}
diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux_test.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux_test.go
new file mode 100644
index 00000000000..fae0fdffbbf
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux_test.go
@@ -0,0 +1,58 @@
+package sysinfo
+
+import (
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "testing"
+)
+
+func TestReadProcBool(t *testing.T) {
+ tmpDir, err := ioutil.TempDir("", "test-sysinfo-proc")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpDir)
+
+ procFile := filepath.Join(tmpDir, "read-proc-bool")
+ if err := ioutil.WriteFile(procFile, []byte("1"), 644); err != nil {
+ t.Fatal(err)
+ }
+
+ if !readProcBool(procFile) {
+ t.Fatal("expected proc bool to be true, got false")
+ }
+
+ if err := ioutil.WriteFile(procFile, []byte("0"), 644); err != nil {
+ t.Fatal(err)
+ }
+ if readProcBool(procFile) {
+ t.Fatal("expected proc bool to be false, got false")
+ }
+
+ if readProcBool(path.Join(tmpDir, "no-exist")) {
+ t.Fatal("should be false for non-existent entry")
+ }
+
+}
+
+func TestCgroupEnabled(t *testing.T) {
+ cgroupDir, err := ioutil.TempDir("", "cgroup-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(cgroupDir)
+
+ if cgroupEnabled(cgroupDir, "test") {
+ t.Fatal("cgroupEnabled should be false")
+ }
+
+ if err := ioutil.WriteFile(path.Join(cgroupDir, "test"), []byte{}, 644); err != nil {
+ t.Fatal(err)
+ }
+
+ if !cgroupEnabled(cgroupDir, "test") {
+ t.Fatal("cgroupEnabled should be true")
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_solaris.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_solaris.go
new file mode 100644
index 00000000000..75a9c9bb2d5
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_solaris.go
@@ -0,0 +1,119 @@
+// +build solaris,cgo
+
+package sysinfo
+
+import (
+ "bytes"
+ "os/exec"
+ "strconv"
+ "strings"
+)
+
+/*
+#cgo LDFLAGS: -llgrp
+#include
+#include
+#include
+int getLgrpCount() {
+ lgrp_cookie_t lgrpcookie = LGRP_COOKIE_NONE;
+ uint_t nlgrps;
+
+ if ((lgrpcookie = lgrp_init(LGRP_VIEW_OS)) == LGRP_COOKIE_NONE) {
+ return -1;
+ }
+ nlgrps = lgrp_nlgrps(lgrpcookie);
+ return nlgrps;
+}
+*/
+import "C"
+
+// IsCPUSharesAvailable returns whether CPUShares setting is supported.
+// We need FSS to be set as default scheduling class to support CPU Shares
+func IsCPUSharesAvailable() bool {
+ cmd := exec.Command("/usr/sbin/dispadmin", "-d")
+ outBuf := new(bytes.Buffer)
+ errBuf := new(bytes.Buffer)
+ cmd.Stderr = errBuf
+ cmd.Stdout = outBuf
+
+ if err := cmd.Run(); err != nil {
+ return false
+ }
+ return (strings.Contains(outBuf.String(), "FSS"))
+}
+
+// New returns a new SysInfo, using the filesystem to detect which features
+// the kernel supports.
+//NOTE Solaris: If we change the below capabilities be sure
+// to update verifyPlatformContainerSettings() in daemon_solaris.go
+func New(quiet bool) *SysInfo {
+ sysInfo := &SysInfo{}
+ sysInfo.cgroupMemInfo = setCgroupMem(quiet)
+ sysInfo.cgroupCPUInfo = setCgroupCPU(quiet)
+ sysInfo.cgroupBlkioInfo = setCgroupBlkioInfo(quiet)
+ sysInfo.cgroupCpusetInfo = setCgroupCPUsetInfo(quiet)
+
+ sysInfo.IPv4ForwardingDisabled = false
+
+ sysInfo.AppArmor = false
+
+ return sysInfo
+}
+
+// setCgroupMem reads the memory information for Solaris.
+func setCgroupMem(quiet bool) cgroupMemInfo {
+
+ return cgroupMemInfo{
+ MemoryLimit: true,
+ SwapLimit: true,
+ MemoryReservation: false,
+ OomKillDisable: false,
+ MemorySwappiness: false,
+ KernelMemory: false,
+ }
+}
+
+// setCgroupCPU reads the cpu information for Solaris.
+func setCgroupCPU(quiet bool) cgroupCPUInfo {
+
+ return cgroupCPUInfo{
+ CPUShares: true,
+ CPUCfsPeriod: false,
+ CPUCfsQuota: true,
+ }
+}
+
+// blkio switches are not supported in Solaris.
+func setCgroupBlkioInfo(quiet bool) cgroupBlkioInfo {
+
+ return cgroupBlkioInfo{
+ BlkioWeight: false,
+ BlkioWeightDevice: false,
+ }
+}
+
+// setCgroupCPUsetInfo reads the cpuset information for Solaris.
+func setCgroupCPUsetInfo(quiet bool) cgroupCpusetInfo {
+
+ return cgroupCpusetInfo{
+ Cpuset: true,
+ Cpus: getCPUCount(),
+ Mems: getLgrpCount(),
+ }
+}
+
+func getCPUCount() string {
+ ncpus := C.sysconf(C._SC_NPROCESSORS_ONLN)
+ if ncpus <= 0 {
+ return ""
+ }
+ return strconv.FormatInt(int64(ncpus), 16)
+}
+
+func getLgrpCount() string {
+ nlgrps := C.getLgrpCount()
+ if nlgrps <= 0 {
+ return ""
+ }
+ return strconv.FormatInt(int64(nlgrps), 16)
+}
diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_test.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_test.go
new file mode 100644
index 00000000000..b61fbcf541e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_test.go
@@ -0,0 +1,26 @@
+package sysinfo
+
+import "testing"
+
+func TestIsCpusetListAvailable(t *testing.T) {
+ cases := []struct {
+ provided string
+ available string
+ res bool
+ err bool
+ }{
+ {"1", "0-4", true, false},
+ {"01,3", "0-4", true, false},
+ {"", "0-7", true, false},
+ {"1--42", "0-7", false, true},
+ {"1-42", "00-1,8,,9", false, true},
+ {"1,41-42", "43,45", false, false},
+ {"0-3", "", false, false},
+ }
+ for _, c := range cases {
+ r, err := isCpusetListAvailable(c.provided, c.available)
+ if (c.err && err == nil) && r != c.res {
+ t.Fatalf("Expected pair: %v, %v for %s, %s. Got %v, %v instead", c.res, c.err, c.provided, c.available, (c.err && err == nil), r)
+ }
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_windows.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_windows.go
new file mode 100644
index 00000000000..8889318c398
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_windows.go
@@ -0,0 +1,7 @@
+package sysinfo
+
+// New returns an empty SysInfo for windows for now.
+func New(quiet bool) *SysInfo {
+ sysInfo := &SysInfo{}
+ return sysInfo
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes.go b/vendor/github.com/docker/docker/pkg/system/chtimes.go
new file mode 100644
index 00000000000..7637f12e1a7
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/chtimes.go
@@ -0,0 +1,52 @@
+package system
+
+import (
+ "os"
+ "syscall"
+ "time"
+ "unsafe"
+)
+
+var (
+ maxTime time.Time
+)
+
+func init() {
+ if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 {
+ // This is a 64 bit timespec
+ // os.Chtimes limits time to the following
+ maxTime = time.Unix(0, 1<<63-1)
+ } else {
+ // This is a 32 bit timespec
+ maxTime = time.Unix(1<<31-1, 0)
+ }
+}
+
+// Chtimes changes the access time and modified time of a file at the given path
+func Chtimes(name string, atime time.Time, mtime time.Time) error {
+ unixMinTime := time.Unix(0, 0)
+ unixMaxTime := maxTime
+
+ // If the modified time is prior to the Unix Epoch, or after the
+ // end of Unix Time, os.Chtimes has undefined behavior
+ // default to Unix Epoch in this case, just in case
+
+ if atime.Before(unixMinTime) || atime.After(unixMaxTime) {
+ atime = unixMinTime
+ }
+
+ if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) {
+ mtime = unixMinTime
+ }
+
+ if err := os.Chtimes(name, atime, mtime); err != nil {
+ return err
+ }
+
+ // Take platform specific action for setting create time.
+ if err := setCTime(name, mtime); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_test.go b/vendor/github.com/docker/docker/pkg/system/chtimes_test.go
new file mode 100644
index 00000000000..5c87df32a27
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/chtimes_test.go
@@ -0,0 +1,94 @@
+package system
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+)
+
+// prepareTempFile creates a temporary file in a temporary directory.
+func prepareTempFile(t *testing.T) (string, string) {
+ dir, err := ioutil.TempDir("", "docker-system-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ file := filepath.Join(dir, "exist")
+ if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil {
+ t.Fatal(err)
+ }
+ return file, dir
+}
+
+// TestChtimes tests Chtimes on a tempfile. Test only mTime, because aTime is OS dependent
+func TestChtimes(t *testing.T) {
+ file, dir := prepareTempFile(t)
+ defer os.RemoveAll(dir)
+
+ beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second)
+ unixEpochTime := time.Unix(0, 0)
+ afterUnixEpochTime := time.Unix(100, 0)
+ unixMaxTime := maxTime
+
+ // Test both aTime and mTime set to Unix Epoch
+ Chtimes(file, unixEpochTime, unixEpochTime)
+
+ f, err := os.Stat(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if f.ModTime() != unixEpochTime {
+ t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime())
+ }
+
+ // Test aTime before Unix Epoch and mTime set to Unix Epoch
+ Chtimes(file, beforeUnixEpochTime, unixEpochTime)
+
+ f, err = os.Stat(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if f.ModTime() != unixEpochTime {
+ t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime())
+ }
+
+ // Test aTime set to Unix Epoch and mTime before Unix Epoch
+ Chtimes(file, unixEpochTime, beforeUnixEpochTime)
+
+ f, err = os.Stat(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if f.ModTime() != unixEpochTime {
+ t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime())
+ }
+
+ // Test both aTime and mTime set to after Unix Epoch (valid time)
+ Chtimes(file, afterUnixEpochTime, afterUnixEpochTime)
+
+ f, err = os.Stat(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if f.ModTime() != afterUnixEpochTime {
+ t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, f.ModTime())
+ }
+
+ // Test both aTime and mTime set to Unix max time
+ Chtimes(file, unixMaxTime, unixMaxTime)
+
+ f, err = os.Stat(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if f.ModTime().Truncate(time.Second) != unixMaxTime.Truncate(time.Second) {
+ t.Fatalf("Expected: %s, got: %s", unixMaxTime.Truncate(time.Second), f.ModTime().Truncate(time.Second))
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go b/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go
new file mode 100644
index 00000000000..09d58bcbfdd
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go
@@ -0,0 +1,14 @@
+// +build !windows
+
+package system
+
+import (
+ "time"
+)
+
+//setCTime will set the create time on a file. On Unix, the create
+//time is updated as a side effect of setting the modified time, so
+//no action is required.
+func setCTime(path string, ctime time.Time) error {
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_unix_test.go b/vendor/github.com/docker/docker/pkg/system/chtimes_unix_test.go
new file mode 100644
index 00000000000..0aafe1d8450
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/chtimes_unix_test.go
@@ -0,0 +1,91 @@
+// +build !windows
+
+package system
+
+import (
+ "os"
+ "syscall"
+ "testing"
+ "time"
+)
+
+// TestChtimes tests Chtimes access time on a tempfile on Linux
+func TestChtimesLinux(t *testing.T) {
+ file, dir := prepareTempFile(t)
+ defer os.RemoveAll(dir)
+
+ beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second)
+ unixEpochTime := time.Unix(0, 0)
+ afterUnixEpochTime := time.Unix(100, 0)
+ unixMaxTime := maxTime
+
+ // Test both aTime and mTime set to Unix Epoch
+ Chtimes(file, unixEpochTime, unixEpochTime)
+
+ f, err := os.Stat(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ stat := f.Sys().(*syscall.Stat_t)
+ aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec))
+ if aTime != unixEpochTime {
+ t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime)
+ }
+
+ // Test aTime before Unix Epoch and mTime set to Unix Epoch
+ Chtimes(file, beforeUnixEpochTime, unixEpochTime)
+
+ f, err = os.Stat(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ stat = f.Sys().(*syscall.Stat_t)
+ aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec))
+ if aTime != unixEpochTime {
+ t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime)
+ }
+
+ // Test aTime set to Unix Epoch and mTime before Unix Epoch
+ Chtimes(file, unixEpochTime, beforeUnixEpochTime)
+
+ f, err = os.Stat(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ stat = f.Sys().(*syscall.Stat_t)
+ aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec))
+ if aTime != unixEpochTime {
+ t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime)
+ }
+
+ // Test both aTime and mTime set to after Unix Epoch (valid time)
+ Chtimes(file, afterUnixEpochTime, afterUnixEpochTime)
+
+ f, err = os.Stat(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ stat = f.Sys().(*syscall.Stat_t)
+ aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec))
+ if aTime != afterUnixEpochTime {
+ t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, aTime)
+ }
+
+ // Test both aTime and mTime set to Unix max time
+ Chtimes(file, unixMaxTime, unixMaxTime)
+
+ f, err = os.Stat(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ stat = f.Sys().(*syscall.Stat_t)
+ aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec))
+ if aTime.Truncate(time.Second) != unixMaxTime.Truncate(time.Second) {
+ t.Fatalf("Expected: %s, got: %s", unixMaxTime.Truncate(time.Second), aTime.Truncate(time.Second))
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go
new file mode 100644
index 00000000000..29458684659
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go
@@ -0,0 +1,27 @@
+// +build windows
+
+package system
+
+import (
+ "syscall"
+ "time"
+)
+
+//setCTime will set the create time on a file. On Windows, this requires
+//calling SetFileTime and explicitly including the create time.
+func setCTime(path string, ctime time.Time) error {
+ ctimespec := syscall.NsecToTimespec(ctime.UnixNano())
+ pathp, e := syscall.UTF16PtrFromString(path)
+ if e != nil {
+ return e
+ }
+ h, e := syscall.CreateFile(pathp,
+ syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil,
+ syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
+ if e != nil {
+ return e
+ }
+ defer syscall.Close(h)
+ c := syscall.NsecToFiletime(syscall.TimespecToNsec(ctimespec))
+ return syscall.SetFileTime(h, &c, nil, nil)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_windows_test.go b/vendor/github.com/docker/docker/pkg/system/chtimes_windows_test.go
new file mode 100644
index 00000000000..be57558e1b3
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/chtimes_windows_test.go
@@ -0,0 +1,86 @@
+// +build windows
+
+package system
+
+import (
+ "os"
+ "syscall"
+ "testing"
+ "time"
+)
+
+// TestChtimes tests Chtimes access time on a tempfile on Windows
+func TestChtimesWindows(t *testing.T) {
+ file, dir := prepareTempFile(t)
+ defer os.RemoveAll(dir)
+
+ beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second)
+ unixEpochTime := time.Unix(0, 0)
+ afterUnixEpochTime := time.Unix(100, 0)
+ unixMaxTime := maxTime
+
+ // Test both aTime and mTime set to Unix Epoch
+ Chtimes(file, unixEpochTime, unixEpochTime)
+
+ f, err := os.Stat(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ aTime := time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds())
+ if aTime != unixEpochTime {
+ t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime)
+ }
+
+ // Test aTime before Unix Epoch and mTime set to Unix Epoch
+ Chtimes(file, beforeUnixEpochTime, unixEpochTime)
+
+ f, err = os.Stat(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds())
+ if aTime != unixEpochTime {
+ t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime)
+ }
+
+ // Test aTime set to Unix Epoch and mTime before Unix Epoch
+ Chtimes(file, unixEpochTime, beforeUnixEpochTime)
+
+ f, err = os.Stat(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds())
+ if aTime != unixEpochTime {
+ t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime)
+ }
+
+ // Test both aTime and mTime set to after Unix Epoch (valid time)
+ Chtimes(file, afterUnixEpochTime, afterUnixEpochTime)
+
+ f, err = os.Stat(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds())
+ if aTime != afterUnixEpochTime {
+ t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, aTime)
+ }
+
+ // Test both aTime and mTime set to Unix max time
+ Chtimes(file, unixMaxTime, unixMaxTime)
+
+ f, err = os.Stat(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds())
+ if aTime.Truncate(time.Second) != unixMaxTime.Truncate(time.Second) {
+ t.Fatalf("Expected: %s, got: %s", unixMaxTime.Truncate(time.Second), aTime.Truncate(time.Second))
+ }
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/errors.go b/vendor/github.com/docker/docker/pkg/system/errors.go
similarity index 66%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/errors.go
rename to vendor/github.com/docker/docker/pkg/system/errors.go
index 63045186fe8..288318985e3 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/errors.go
+++ b/vendor/github.com/docker/docker/pkg/system/errors.go
@@ -5,5 +5,6 @@ import (
)
var (
+ // ErrNotSupportedPlatform means the platform is not supported.
ErrNotSupportedPlatform = errors.New("platform and architecture is not supported")
)
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/events_windows.go b/vendor/github.com/docker/docker/pkg/system/events_windows.go
similarity index 83%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/events_windows.go
rename to vendor/github.com/docker/docker/pkg/system/events_windows.go
index 23f7c618bc6..04e2de78714 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/events_windows.go
+++ b/vendor/github.com/docker/docker/pkg/system/events_windows.go
@@ -8,11 +8,6 @@ import (
"unsafe"
)
-const (
- EVENT_ALL_ACCESS = 0x1F0003
- EVENT_MODIFY_STATUS = 0x0002
-)
-
var (
procCreateEvent = modkernel32.NewProc("CreateEventW")
procOpenEvent = modkernel32.NewProc("OpenEventW")
@@ -21,13 +16,14 @@ var (
procPulseEvent = modkernel32.NewProc("PulseEvent")
)
+// CreateEvent implements win32 CreateEventW func in golang. It will create an event object.
func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) {
namep, _ := syscall.UTF16PtrFromString(name)
- var _p1 uint32 = 0
+ var _p1 uint32
if manualReset {
_p1 = 1
}
- var _p2 uint32 = 0
+ var _p2 uint32
if initialState {
_p2 = 1
}
@@ -40,9 +36,10 @@ func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool,
return
}
+// OpenEvent implements win32 OpenEventW func in golang. It opens an event object.
func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) {
namep, _ := syscall.UTF16PtrFromString(name)
- var _p1 uint32 = 0
+ var _p1 uint32
if inheritHandle {
_p1 = 1
}
@@ -55,14 +52,17 @@ func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle sy
return
}
+// SetEvent implements win32 SetEvent func in golang.
func SetEvent(handle syscall.Handle) (err error) {
return setResetPulse(handle, procSetEvent)
}
+// ResetEvent implements win32 ResetEvent func in golang.
func ResetEvent(handle syscall.Handle) (err error) {
return setResetPulse(handle, procResetEvent)
}
+// PulseEvent implements win32 PulseEvent func in golang.
func PulseEvent(handle syscall.Handle) (err error) {
return setResetPulse(handle, procPulseEvent)
}
diff --git a/vendor/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/docker/docker/pkg/system/filesys.go
new file mode 100644
index 00000000000..c14feb84965
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/filesys.go
@@ -0,0 +1,19 @@
+// +build !windows
+
+package system
+
+import (
+ "os"
+ "path/filepath"
+)
+
+// MkdirAll creates a directory named path along with any necessary parents,
+// with permission specified by attribute perm for all dir created.
+func MkdirAll(path string, perm os.FileMode) error {
+ return os.MkdirAll(path, perm)
+}
+
+// IsAbs is a platform-specific wrapper for filepath.IsAbs.
+func IsAbs(path string) bool {
+ return filepath.IsAbs(path)
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go
similarity index 67%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys_windows.go
rename to vendor/github.com/docker/docker/pkg/system/filesys_windows.go
index 90b500608ef..16823d5517c 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys_windows.go
+++ b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go
@@ -4,7 +4,9 @@ package system
import (
"os"
+ "path/filepath"
"regexp"
+ "strings"
"syscall"
)
@@ -62,3 +64,19 @@ func MkdirAll(path string, perm os.FileMode) error {
}
return nil
}
+
+// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows,
+// golang filepath.IsAbs does not consider a path \windows\system32 as absolute
+// as it doesn't start with a drive-letter/colon combination. However, in
+// docker we need to verify things such as WORKDIR /windows/system32 in
+// a Dockerfile (which gets translated to \windows\system32 when being processed
+// by the daemon. This SHOULD be treated as absolute from a docker processing
+// perspective.
+func IsAbs(path string) bool {
+ if !filepath.IsAbs(path) {
+ if !strings.HasPrefix(path, string(os.PathSeparator)) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat.go b/vendor/github.com/docker/docker/pkg/system/lstat.go
similarity index 74%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat.go
rename to vendor/github.com/docker/docker/pkg/system/lstat.go
index d0e43b37097..bd23c4d50b2 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat.go
+++ b/vendor/github.com/docker/docker/pkg/system/lstat.go
@@ -7,10 +7,10 @@ import (
)
// Lstat takes a path to a file and returns
-// a system.Stat_t type pertaining to that file.
+// a system.StatT type pertaining to that file.
//
// Throws an error if the file does not exist
-func Lstat(path string) (*Stat_t, error) {
+func Lstat(path string) (*StatT, error) {
s := &syscall.Stat_t{}
if err := syscall.Lstat(path, s); err != nil {
return nil, err
diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_unix_test.go b/vendor/github.com/docker/docker/pkg/system/lstat_unix_test.go
new file mode 100644
index 00000000000..062cf53bfe6
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/lstat_unix_test.go
@@ -0,0 +1,30 @@
+// +build linux freebsd
+
+package system
+
+import (
+ "os"
+ "testing"
+)
+
+// TestLstat tests Lstat for existing and non existing files
+func TestLstat(t *testing.T) {
+ file, invalid, _, dir := prepareFiles(t)
+ defer os.RemoveAll(dir)
+
+ statFile, err := Lstat(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if statFile == nil {
+ t.Fatal("returned empty stat for existing file")
+ }
+
+ statInvalid, err := Lstat(invalid)
+ if err == nil {
+ t.Fatal("did not return error for non-existing file")
+ }
+ if statInvalid != nil {
+ t.Fatal("returned non-nil stat for non-existing file")
+ }
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_windows.go b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go
similarity index 77%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_windows.go
rename to vendor/github.com/docker/docker/pkg/system/lstat_windows.go
index eee1be26eb5..49e87eb40ba 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_windows.go
+++ b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go
@@ -6,21 +6,17 @@ import (
"os"
)
-// Some explanation for my own sanity, and hopefully maintainers in the
-// future.
-//
// Lstat calls os.Lstat to get a fileinfo interface back.
// This is then copied into our own locally defined structure.
// Note the Linux version uses fromStatT to do the copy back,
// but that not strictly necessary when already in an OS specific module.
-
-func Lstat(path string) (*Stat_t, error) {
+func Lstat(path string) (*StatT, error) {
fi, err := os.Lstat(path)
if err != nil {
return nil, err
}
- return &Stat_t{
+ return &StatT{
name: fi.Name(),
size: fi.Size(),
mode: fi.Mode(),
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo.go b/vendor/github.com/docker/docker/pkg/system/meminfo.go
similarity index 100%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo.go
rename to vendor/github.com/docker/docker/pkg/system/meminfo.go
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go
similarity index 85%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go
rename to vendor/github.com/docker/docker/pkg/system/meminfo_linux.go
index 41f2bab6037..385f1d5e735 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go
@@ -2,21 +2,16 @@ package system
import (
"bufio"
- "errors"
"io"
"os"
"strconv"
"strings"
- "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units"
-)
-
-var (
- ErrMalformed = errors.New("malformed file")
+ "github.com/docker/go-units"
)
// ReadMemInfo retrieves memory statistics of the host system and returns a
-// MemInfo type.
+// MemInfo type.
func ReadMemInfo() (*MemInfo, error) {
file, err := os.Open("/proc/meminfo")
if err != nil {
@@ -27,8 +22,7 @@ func ReadMemInfo() (*MemInfo, error) {
}
// parseMemInfo parses the /proc/meminfo file into
-// a MemInfo object given a io.Reader to the file.
-//
+// a MemInfo object given an io.Reader to the file.
// Throws error if there are problems reading from the file
func parseMemInfo(reader io.Reader) (*MemInfo, error) {
meminfo := &MemInfo{}
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go b/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go
new file mode 100644
index 00000000000..313c601b125
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go
@@ -0,0 +1,128 @@
+// +build solaris,cgo
+
+package system
+
+import (
+ "fmt"
+ "unsafe"
+)
+
+// #cgo LDFLAGS: -lkstat
+// #include
+// #include
+// #include
+// #include
+// #include
+// #include
+// struct swaptable *allocSwaptable(int num) {
+// struct swaptable *st;
+// struct swapent *swapent;
+// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int));
+// swapent = st->swt_ent;
+// for (int i = 0; i < num; i++,swapent++) {
+// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char));
+// }
+// st->swt_n = num;
+// return st;
+//}
+// void freeSwaptable (struct swaptable *st) {
+// struct swapent *swapent = st->swt_ent;
+// for (int i = 0; i < st->swt_n; i++,swapent++) {
+// free(swapent->ste_path);
+// }
+// free(st);
+// }
+// swapent_t getSwapEnt(swapent_t *ent, int i) {
+// return ent[i];
+// }
+// int64_t getPpKernel() {
+// int64_t pp_kernel = 0;
+// kstat_ctl_t *ksc;
+// kstat_t *ks;
+// kstat_named_t *knp;
+// kid_t kid;
+//
+// if ((ksc = kstat_open()) == NULL) {
+// return -1;
+// }
+// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) {
+// return -1;
+// }
+// if (((kid = kstat_read(ksc, ks, NULL)) == -1) ||
+// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) {
+// return -1;
+// }
+// switch (knp->data_type) {
+// case KSTAT_DATA_UINT64:
+// pp_kernel = knp->value.ui64;
+// break;
+// case KSTAT_DATA_UINT32:
+// pp_kernel = knp->value.ui32;
+// break;
+// }
+// pp_kernel *= sysconf(_SC_PAGESIZE);
+// return (pp_kernel > 0 ? pp_kernel : -1);
+// }
+import "C"
+
+// Get the system memory info using sysconf same as prtconf
+func getTotalMem() int64 {
+ pagesize := C.sysconf(C._SC_PAGESIZE)
+ npages := C.sysconf(C._SC_PHYS_PAGES)
+ return int64(pagesize * npages)
+}
+
+func getFreeMem() int64 {
+ pagesize := C.sysconf(C._SC_PAGESIZE)
+ npages := C.sysconf(C._SC_AVPHYS_PAGES)
+ return int64(pagesize * npages)
+}
+
+// ReadMemInfo retrieves memory statistics of the host system and returns a
+// MemInfo type.
+func ReadMemInfo() (*MemInfo, error) {
+
+ ppKernel := C.getPpKernel()
+ MemTotal := getTotalMem()
+ MemFree := getFreeMem()
+ SwapTotal, SwapFree, err := getSysSwap()
+
+ if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 ||
+ SwapFree < 0 {
+ return nil, fmt.Errorf("Error getting system memory info %v\n", err)
+ }
+
+ meminfo := &MemInfo{}
+ // Total memory is total physical memory less than memory locked by kernel
+ meminfo.MemTotal = MemTotal - int64(ppKernel)
+ meminfo.MemFree = MemFree
+ meminfo.SwapTotal = SwapTotal
+ meminfo.SwapFree = SwapFree
+
+ return meminfo, nil
+}
+
+func getSysSwap() (int64, int64, error) {
+ var tSwap int64
+ var fSwap int64
+ var diskblksPerPage int64
+ num, err := C.swapctl(C.SC_GETNSWP, nil)
+ if err != nil {
+ return -1, -1, err
+ }
+ st := C.allocSwaptable(num)
+ _, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st))
+ if err != nil {
+ C.freeSwaptable(st)
+ return -1, -1, err
+ }
+
+ diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT)
+ for i := 0; i < int(num); i++ {
+ swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i))
+ tSwap += int64(swapent.ste_pages) * diskblksPerPage
+ fSwap += int64(swapent.ste_free) * diskblksPerPage
+ }
+ C.freeSwaptable(st)
+ return tSwap, fSwap, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_unix_test.go b/vendor/github.com/docker/docker/pkg/system/meminfo_unix_test.go
new file mode 100644
index 00000000000..44f5562882d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo_unix_test.go
@@ -0,0 +1,40 @@
+// +build linux freebsd
+
+package system
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/docker/go-units"
+)
+
+// TestMemInfo tests parseMemInfo with a static meminfo string
+func TestMemInfo(t *testing.T) {
+ const input = `
+ MemTotal: 1 kB
+ MemFree: 2 kB
+ SwapTotal: 3 kB
+ SwapFree: 4 kB
+ Malformed1:
+ Malformed2: 1
+ Malformed3: 2 MB
+ Malformed4: X kB
+ `
+ meminfo, err := parseMemInfo(strings.NewReader(input))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if meminfo.MemTotal != 1*units.KiB {
+ t.Fatalf("Unexpected MemTotal: %d", meminfo.MemTotal)
+ }
+ if meminfo.MemFree != 2*units.KiB {
+ t.Fatalf("Unexpected MemFree: %d", meminfo.MemFree)
+ }
+ if meminfo.SwapTotal != 3*units.KiB {
+ t.Fatalf("Unexpected SwapTotal: %d", meminfo.SwapTotal)
+ }
+ if meminfo.SwapFree != 4*units.KiB {
+ t.Fatalf("Unexpected SwapFree: %d", meminfo.SwapFree)
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go
new file mode 100644
index 00000000000..3ce019dffdd
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go
@@ -0,0 +1,8 @@
+// +build !linux,!windows,!solaris
+
+package system
+
+// ReadMemInfo is not supported on platforms other than linux and windows.
+func ReadMemInfo() (*MemInfo, error) {
+ return nil, ErrNotSupportedPlatform
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_windows.go b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go
similarity index 100%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_windows.go
rename to vendor/github.com/docker/docker/pkg/system/meminfo_windows.go
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/docker/docker/pkg/system/mknod.go
similarity index 70%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod.go
rename to vendor/github.com/docker/docker/pkg/system/mknod.go
index 26617eb08f4..73958182b4e 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod.go
+++ b/vendor/github.com/docker/docker/pkg/system/mknod.go
@@ -7,14 +7,16 @@ import (
)
// Mknod creates a filesystem node (file, device special file or named pipe) named path
-// with attributes specified by mode and dev
+// with attributes specified by mode and dev.
func Mknod(path string, mode uint32, dev int) error {
return syscall.Mknod(path, mode, dev)
}
+// Mkdev is used to build the value of linux devices (in /dev/) which specifies major
+// and minor number of the newly created device special file.
// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major,
-// then the top 12 bits of the minor
+// then the top 12 bits of the minor.
func Mkdev(major int64, minor int64) uint32 {
return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod_windows.go b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go
similarity index 72%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod_windows.go
rename to vendor/github.com/docker/docker/pkg/system/mknod_windows.go
index 1811542ab3f..2e863c0215b 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod_windows.go
+++ b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go
@@ -2,10 +2,12 @@
package system
+// Mknod is not implemented on Windows.
func Mknod(path string, mode uint32, dev int) error {
return ErrNotSupportedPlatform
}
+// Mkdev is not implemented on Windows.
func Mkdev(major int64, minor int64) uint32 {
panic("Mkdev not implemented on Windows.")
}
diff --git a/vendor/github.com/docker/docker/pkg/system/path_unix.go b/vendor/github.com/docker/docker/pkg/system/path_unix.go
new file mode 100644
index 00000000000..c607c4db09f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/path_unix.go
@@ -0,0 +1,14 @@
+// +build !windows
+
+package system
+
+// DefaultPathEnv is unix style list of directories to search for
+// executables. Each directory is separated from the next by a colon
+// ':' character .
+const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+
+// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
+// is the system drive. This is a no-op on Linux.
+func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
+ return path, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/path_windows.go b/vendor/github.com/docker/docker/pkg/system/path_windows.go
new file mode 100644
index 00000000000..cbfe2c1576c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/path_windows.go
@@ -0,0 +1,37 @@
+// +build windows
+
+package system
+
+import (
+ "fmt"
+ "path/filepath"
+ "strings"
+)
+
+// DefaultPathEnv is deliberately empty on Windows as the default path will be set by
+// the container. Docker has no context of what the default path should be.
+const DefaultPathEnv = ""
+
+// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
+// This is used, for example, when validating a user provided path in docker cp.
+// If a drive letter is supplied, it must be the system drive. The drive letter
+// is always removed. Also, it translates it to OS semantics (IOW / to \). We
+// need the path in this syntax so that it can ultimately be contatenated with
+// a Windows long-path which doesn't support drive-letters. Examples:
+// C: --> Fail
+// C:\ --> \
+// a --> a
+// /a --> \a
+// d:\ --> Fail
+func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
+ if len(path) == 2 && string(path[1]) == ":" {
+ return "", fmt.Errorf("No relative path specified in %q", path)
+ }
+ if !filepath.IsAbs(path) || len(path) < 2 {
+ return filepath.FromSlash(path), nil
+ }
+ if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
+ return "", fmt.Errorf("The specified path is not on the system drive (C:)")
+ }
+ return filepath.FromSlash(path[2:]), nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/path_windows_test.go b/vendor/github.com/docker/docker/pkg/system/path_windows_test.go
new file mode 100644
index 00000000000..eccb26aaea7
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/path_windows_test.go
@@ -0,0 +1,78 @@
+// +build windows
+
+package system
+
+import "testing"
+
+// TestCheckSystemDriveAndRemoveDriveLetter tests CheckSystemDriveAndRemoveDriveLetter
+func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) {
+ // Fails if not C drive.
+ path, err := CheckSystemDriveAndRemoveDriveLetter(`d:\`)
+ if err == nil || (err != nil && err.Error() != "The specified path is not on the system drive (C:)") {
+ t.Fatalf("Expected error for d:")
+ }
+
+ // Single character is unchanged
+ if path, err = CheckSystemDriveAndRemoveDriveLetter("z"); err != nil {
+ t.Fatalf("Single character should pass")
+ }
+ if path != "z" {
+ t.Fatalf("Single character should be unchanged")
+ }
+
+ // Two characters without colon is unchanged
+ if path, err = CheckSystemDriveAndRemoveDriveLetter("AB"); err != nil {
+ t.Fatalf("2 characters without colon should pass")
+ }
+ if path != "AB" {
+ t.Fatalf("2 characters without colon should be unchanged")
+ }
+
+ // Abs path without drive letter
+ if path, err = CheckSystemDriveAndRemoveDriveLetter(`\l`); err != nil {
+ t.Fatalf("abs path no drive letter should pass")
+ }
+ if path != `\l` {
+ t.Fatalf("abs path without drive letter should be unchanged")
+ }
+
+ // Abs path without drive letter, linux style
+ if path, err = CheckSystemDriveAndRemoveDriveLetter(`/l`); err != nil {
+ t.Fatalf("abs path no drive letter linux style should pass")
+ }
+ if path != `\l` {
+ t.Fatalf("abs path without drive letter linux failed %s", path)
+ }
+
+ // Drive-colon should be stripped
+ if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:\`); err != nil {
+ t.Fatalf("An absolute path should pass")
+ }
+ if path != `\` {
+ t.Fatalf(`An absolute path should have been shortened to \ %s`, path)
+ }
+
+ // Verify with a linux-style path
+ if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:/`); err != nil {
+ t.Fatalf("An absolute path should pass")
+ }
+ if path != `\` {
+ t.Fatalf(`A linux style absolute path should have been shortened to \ %s`, path)
+ }
+
+ // Failure on c:
+ if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:`); err == nil {
+ t.Fatalf("c: should fail")
+ }
+ if err.Error() != `No relative path specified in "c:"` {
+ t.Fatalf(path, err)
+ }
+
+ // Failure on d:
+ if path, err = CheckSystemDriveAndRemoveDriveLetter(`d:`); err == nil {
+ t.Fatalf("c: should fail")
+ }
+ if err.Error() != `No relative path specified in "d:"` {
+ t.Fatalf(path, err)
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat.go b/vendor/github.com/docker/docker/pkg/system/stat.go
new file mode 100644
index 00000000000..087034c5ec5
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat.go
@@ -0,0 +1,53 @@
+// +build !windows
+
+package system
+
+import (
+ "syscall"
+)
+
+// StatT type contains status of a file. It contains metadata
+// like permission, owner, group, size, etc about a file.
+type StatT struct {
+ mode uint32
+ uid uint32
+ gid uint32
+ rdev uint64
+ size int64
+ mtim syscall.Timespec
+}
+
+// Mode returns file's permission mode.
+func (s StatT) Mode() uint32 {
+ return s.mode
+}
+
+// UID returns file's user id of owner.
+func (s StatT) UID() uint32 {
+ return s.uid
+}
+
+// GID returns file's group id of owner.
+func (s StatT) GID() uint32 {
+ return s.gid
+}
+
+// Rdev returns file's device ID (if it's special file).
+func (s StatT) Rdev() uint64 {
+ return s.rdev
+}
+
+// Size returns file's size.
+func (s StatT) Size() int64 {
+ return s.size
+}
+
+// Mtim returns file's last modification time.
+func (s StatT) Mtim() syscall.Timespec {
+ return s.mtim
+}
+
+// GetLastModification returns file's last modification time.
+func (s StatT) GetLastModification() syscall.Timespec {
+ return s.Mtim()
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_freebsd.go b/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go
similarity index 78%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_freebsd.go
rename to vendor/github.com/docker/docker/pkg/system/stat_freebsd.go
index 4b2198b3aab..d0fb6f15190 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_freebsd.go
+++ b/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go
@@ -5,8 +5,8 @@ import (
)
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
-func fromStatT(s *syscall.Stat_t) (*Stat_t, error) {
- return &Stat_t{size: s.Size,
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
mode: uint32(s.Mode),
uid: s.Uid,
gid: s.Gid,
@@ -18,7 +18,7 @@ func fromStatT(s *syscall.Stat_t) (*Stat_t, error) {
// a system.Stat_t type pertaining to that file.
//
// Throws an error if the file does not exist
-func Stat(path string) (*Stat_t, error) {
+func Stat(path string) (*StatT, error) {
s := &syscall.Stat_t{}
if err := syscall.Stat(path, s); err != nil {
return nil, err
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go
similarity index 59%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_linux.go
rename to vendor/github.com/docker/docker/pkg/system/stat_linux.go
index 80262d95192..8b1eded1387 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_linux.go
+++ b/vendor/github.com/docker/docker/pkg/system/stat_linux.go
@@ -5,8 +5,8 @@ import (
)
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
-func fromStatT(s *syscall.Stat_t) (*Stat_t, error) {
- return &Stat_t{size: s.Size,
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
mode: s.Mode,
uid: s.Uid,
gid: s.Gid,
@@ -14,17 +14,17 @@ func fromStatT(s *syscall.Stat_t) (*Stat_t, error) {
mtim: s.Mtim}, nil
}
-// FromStatT exists only on linux, and loads a system.Stat_t from a
+// FromStatT exists only on linux, and loads a system.StatT from a
// syscal.Stat_t.
-func FromStatT(s *syscall.Stat_t) (*Stat_t, error) {
+func FromStatT(s *syscall.Stat_t) (*StatT, error) {
return fromStatT(s)
}
// Stat takes a path to a file and returns
-// a system.Stat_t type pertaining to that file.
+// a system.StatT type pertaining to that file.
//
// Throws an error if the file does not exist
-func Stat(path string) (*Stat_t, error) {
+func Stat(path string) (*StatT, error) {
s := &syscall.Stat_t{}
if err := syscall.Stat(path, s); err != nil {
return nil, err
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go
new file mode 100644
index 00000000000..3c3b71fb219
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go
@@ -0,0 +1,15 @@
+package system
+
+import (
+ "syscall"
+)
+
+// fromStatT creates a system.StatT type from a syscall.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtim}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go
new file mode 100644
index 00000000000..0216985a252
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go
@@ -0,0 +1,34 @@
+// +build solaris
+
+package system
+
+import (
+ "syscall"
+)
+
+// fromStatT creates a system.StatT type from a syscall.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtim}, nil
+}
+
+// FromStatT loads a system.StatT from a syscal.Stat_t.
+func FromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return fromStatT(s)
+}
+
+// Stat takes a path to a file and returns
+// a system.StatT type pertaining to that file.
+//
+// Throws an error if the file does not exist
+func Stat(path string) (*StatT, error) {
+ s := &syscall.Stat_t{}
+ if err := syscall.Stat(path, s); err != nil {
+ return nil, err
+ }
+ return fromStatT(s)
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unix_test.go b/vendor/github.com/docker/docker/pkg/system/stat_unix_test.go
new file mode 100644
index 00000000000..dee8d30a199
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_unix_test.go
@@ -0,0 +1,39 @@
+// +build linux freebsd
+
+package system
+
+import (
+ "os"
+ "syscall"
+ "testing"
+)
+
+// TestFromStatT tests fromStatT for a tempfile
+func TestFromStatT(t *testing.T) {
+ file, _, _, dir := prepareFiles(t)
+ defer os.RemoveAll(dir)
+
+ stat := &syscall.Stat_t{}
+ err := syscall.Lstat(file, stat)
+
+ s, err := fromStatT(stat)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if stat.Mode != s.Mode() {
+ t.Fatal("got invalid mode")
+ }
+ if stat.Uid != s.UID() {
+ t.Fatal("got invalid uid")
+ }
+ if stat.Gid != s.GID() {
+ t.Fatal("got invalid gid")
+ }
+ if stat.Rdev != s.Rdev() {
+ t.Fatal("got invalid rdev")
+ }
+ if stat.Mtim != s.Mtim() {
+ t.Fatal("got invalid mtim")
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go b/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go
new file mode 100644
index 00000000000..f53e9de4d1a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go
@@ -0,0 +1,17 @@
+// +build !linux,!windows,!freebsd,!solaris,!openbsd
+
+package system
+
+import (
+ "syscall"
+)
+
+// fromStatT creates a system.StatT type from a syscall.Stat_t type
+func fromStatT(s *syscall.Stat_t) (*StatT, error) {
+ return &StatT{size: s.Size,
+ mode: uint32(s.Mode),
+ uid: s.Uid,
+ gid: s.Gid,
+ rdev: uint64(s.Rdev),
+ mtim: s.Mtimespec}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/docker/docker/pkg/system/stat_windows.go
new file mode 100644
index 00000000000..39490c625c0
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/stat_windows.go
@@ -0,0 +1,43 @@
+// +build windows
+
+package system
+
+import (
+ "os"
+ "time"
+)
+
+// StatT type contains status of a file. It contains metadata
+// like name, permission, size, etc about a file.
+type StatT struct {
+ name string
+ size int64
+ mode os.FileMode
+ modTime time.Time
+ isDir bool
+}
+
+// Name returns file's name.
+func (s StatT) Name() string {
+ return s.name
+}
+
+// Size returns file's size.
+func (s StatT) Size() int64 {
+ return s.size
+}
+
+// Mode returns file's permission mode.
+func (s StatT) Mode() os.FileMode {
+ return s.mode
+}
+
+// ModTime returns file's last modification time.
+func (s StatT) ModTime() time.Time {
+ return s.modTime
+}
+
+// IsDir returns whether file is actually a directory.
+func (s StatT) IsDir() bool {
+ return s.isDir
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go
new file mode 100644
index 00000000000..3ae91284684
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go
@@ -0,0 +1,17 @@
+// +build linux freebsd
+
+package system
+
+import "syscall"
+
+// Unmount is a platform-specific helper function to call
+// the unmount syscall.
+func Unmount(dest string) error {
+ return syscall.Unmount(dest, 0)
+}
+
+// CommandLineToArgv should not be used on Unix.
+// It simply returns commandLine in the only element in the returned array.
+func CommandLineToArgv(commandLine string) ([]string, error) {
+ return []string{commandLine}, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go
new file mode 100644
index 00000000000..f5f2d569417
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go
@@ -0,0 +1,103 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+
+ "github.com/Sirupsen/logrus"
+)
+
+var (
+ ntuserApiset = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0")
+ procGetVersionExW = modkernel32.NewProc("GetVersionExW")
+)
+
+// OSVersion is a wrapper for Windows version information
+// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx
+type OSVersion struct {
+ Version uint32
+ MajorVersion uint8
+ MinorVersion uint8
+ Build uint16
+}
+
+// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx
+type osVersionInfoEx struct {
+ OSVersionInfoSize uint32
+ MajorVersion uint32
+ MinorVersion uint32
+ BuildNumber uint32
+ PlatformID uint32
+ CSDVersion [128]uint16
+ ServicePackMajor uint16
+ ServicePackMinor uint16
+ SuiteMask uint16
+ ProductType byte
+ Reserve byte
+}
+
+// GetOSVersion gets the operating system version on Windows. Note that
+// docker.exe must be manifested to get the correct version information.
+func GetOSVersion() OSVersion {
+ var err error
+ osv := OSVersion{}
+ osv.Version, err = syscall.GetVersion()
+ if err != nil {
+ // GetVersion never fails.
+ panic(err)
+ }
+ osv.MajorVersion = uint8(osv.Version & 0xFF)
+ osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF)
+ osv.Build = uint16(osv.Version >> 16)
+ return osv
+}
+
+// IsWindowsClient returns true if the SKU is client
+func IsWindowsClient() bool {
+ osviex := &osVersionInfoEx{OSVersionInfoSize: 284}
+ r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex)))
+ if r1 == 0 {
+ logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err)
+ return false
+ }
+ const verNTWorkstation = 0x00000001
+ return osviex.ProductType == verNTWorkstation
+}
+
+// Unmount is a platform-specific helper function to call
+// the unmount syscall. Not supported on Windows
+func Unmount(dest string) error {
+ return nil
+}
+
+// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array.
+func CommandLineToArgv(commandLine string) ([]string, error) {
+ var argc int32
+
+ argsPtr, err := syscall.UTF16PtrFromString(commandLine)
+ if err != nil {
+ return nil, err
+ }
+
+ argv, err := syscall.CommandLineToArgv(argsPtr, &argc)
+ if err != nil {
+ return nil, err
+ }
+ defer syscall.LocalFree(syscall.Handle(uintptr(unsafe.Pointer(argv))))
+
+ newArgs := make([]string, argc)
+ for i, v := range (*argv)[:argc] {
+ newArgs[i] = string(syscall.UTF16ToString((*v)[:]))
+ }
+
+ return newArgs, nil
+}
+
+// HasWin32KSupport determines whether containers that depend on win32k can
+// run on this machine. Win32k is the driver used to implement windowing.
+func HasWin32KSupport() bool {
+ // For now, check for ntuser API support on the host. In the future, a host
+ // may support win32k in containers even if the host does not support ntuser
+ // APIs.
+ return ntuserApiset.Load() == nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows_test.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows_test.go
new file mode 100644
index 00000000000..4886b2b9b4a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/syscall_windows_test.go
@@ -0,0 +1,9 @@
+package system
+
+import "testing"
+
+func TestHasWin32KSupport(t *testing.T) {
+ s := HasWin32KSupport() // make sure this doesn't panic
+
+ t.Logf("win32k: %v", s) // will be different on different platforms -- informative only
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask.go b/vendor/github.com/docker/docker/pkg/system/umask.go
similarity index 61%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask.go
rename to vendor/github.com/docker/docker/pkg/system/umask.go
index fddbecd3903..3d0146b01ad 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask.go
+++ b/vendor/github.com/docker/docker/pkg/system/umask.go
@@ -6,6 +6,8 @@ import (
"syscall"
)
+// Umask sets current process's file mode creation mask to newmask
+// and returns oldmask.
func Umask(newmask int) (oldmask int, err error) {
return syscall.Umask(newmask), nil
}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask_windows.go b/vendor/github.com/docker/docker/pkg/system/umask_windows.go
similarity index 76%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask_windows.go
rename to vendor/github.com/docker/docker/pkg/system/umask_windows.go
index 3be563f89e6..13f1de1769c 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask_windows.go
+++ b/vendor/github.com/docker/docker/pkg/system/umask_windows.go
@@ -2,6 +2,7 @@
package system
+// Umask is not supported on the windows platform.
func Umask(newmask int) (oldmask int, err error) {
// should not be called on cli code path
return 0, ErrNotSupportedPlatform
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_darwin.go b/vendor/github.com/docker/docker/pkg/system/utimes_darwin.go
similarity index 56%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_darwin.go
rename to vendor/github.com/docker/docker/pkg/system/utimes_darwin.go
index 4c6002fe8e2..0a16197544d 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_darwin.go
+++ b/vendor/github.com/docker/docker/pkg/system/utimes_darwin.go
@@ -2,10 +2,7 @@ package system
import "syscall"
+// LUtimesNano is not supported by darwin platform.
func LUtimesNano(path string, ts []syscall.Timespec) error {
return ErrNotSupportedPlatform
}
-
-func UtimesNano(path string, ts []syscall.Timespec) error {
- return syscall.UtimesNano(path, ts)
-}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_freebsd.go b/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go
similarity index 68%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_freebsd.go
rename to vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go
index ceaa044c1c5..e2eac3b553e 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_freebsd.go
+++ b/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go
@@ -5,6 +5,8 @@ import (
"unsafe"
)
+// LUtimesNano is used to change access and modification time of the specified path.
+// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm.
func LUtimesNano(path string, ts []syscall.Timespec) error {
var _path *byte
_path, err := syscall.BytePtrFromString(path)
@@ -18,7 +20,3 @@ func LUtimesNano(path string, ts []syscall.Timespec) error {
return nil
}
-
-func UtimesNano(path string, ts []syscall.Timespec) error {
- return syscall.UtimesNano(path, ts)
-}
diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go
new file mode 100644
index 00000000000..fc8a1aba95c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go
@@ -0,0 +1,26 @@
+package system
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// LUtimesNano is used to change access and modification time of the specified path.
+// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm.
+func LUtimesNano(path string, ts []syscall.Timespec) error {
+ // These are not currently available in syscall
+ atFdCwd := -100
+ atSymLinkNoFollow := 0x100
+
+ var _path *byte
+ _path, err := syscall.BytePtrFromString(path)
+ if err != nil {
+ return err
+ }
+
+ if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(atSymLinkNoFollow), 0, 0); err != 0 && err != syscall.ENOSYS {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unix_test.go b/vendor/github.com/docker/docker/pkg/system/utimes_unix_test.go
new file mode 100644
index 00000000000..1ee0d099f91
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/system/utimes_unix_test.go
@@ -0,0 +1,68 @@
+// +build linux freebsd
+
+package system
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "syscall"
+ "testing"
+)
+
+// prepareFiles creates files for testing in the temp directory
+func prepareFiles(t *testing.T) (string, string, string, string) {
+ dir, err := ioutil.TempDir("", "docker-system-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ file := filepath.Join(dir, "exist")
+ if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ invalid := filepath.Join(dir, "doesnt-exist")
+
+ symlink := filepath.Join(dir, "symlink")
+ if err := os.Symlink(file, symlink); err != nil {
+ t.Fatal(err)
+ }
+
+ return file, invalid, symlink, dir
+}
+
+func TestLUtimesNano(t *testing.T) {
+ file, invalid, symlink, dir := prepareFiles(t)
+ defer os.RemoveAll(dir)
+
+ before, err := os.Stat(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ ts := []syscall.Timespec{{0, 0}, {0, 0}}
+ if err := LUtimesNano(symlink, ts); err != nil {
+ t.Fatal(err)
+ }
+
+ symlinkInfo, err := os.Lstat(symlink)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if before.ModTime().Unix() == symlinkInfo.ModTime().Unix() {
+ t.Fatal("The modification time of the symlink should be different")
+ }
+
+ fileInfo, err := os.Stat(file)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if before.ModTime().Unix() != fileInfo.ModTime().Unix() {
+ t.Fatal("The modification time of the file should be same")
+ }
+
+ if err := LUtimesNano(invalid, ts); err == nil {
+ t.Fatal("Doesn't return an error on a non-existing file")
+ }
+}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unsupported.go b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go
similarity index 63%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unsupported.go
rename to vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go
index adf2734f277..50c3a04364d 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unsupported.go
+++ b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go
@@ -4,10 +4,7 @@ package system
import "syscall"
+// LUtimesNano is not supported on platforms other than linux, freebsd and darwin.
func LUtimesNano(path string, ts []syscall.Timespec) error {
return ErrNotSupportedPlatform
}
-
-func UtimesNano(path string, ts []syscall.Timespec) error {
- return ErrNotSupportedPlatform
-}
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go
similarity index 82%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_linux.go
rename to vendor/github.com/docker/docker/pkg/system/xattrs_linux.go
index 00edb201b58..d2e2c057998 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_linux.go
+++ b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go
@@ -5,7 +5,9 @@ import (
"unsafe"
)
-// Returns a nil slice and nil error if the xattr is not set
+// Lgetxattr retrieves the value of the extended attribute identified by attr
+// and associated with the given path in the file system.
+// It will returns a nil slice and nil error if the xattr is not set.
func Lgetxattr(path string, attr string) ([]byte, error) {
pathBytes, err := syscall.BytePtrFromString(path)
if err != nil {
@@ -36,6 +38,8 @@ func Lgetxattr(path string, attr string) ([]byte, error) {
var _zero uintptr
+// Lsetxattr sets the value of the extended attribute identified by attr
+// and associated with the given path in the file system.
func Lsetxattr(path string, attr string, data []byte, flags int) error {
pathBytes, err := syscall.BytePtrFromString(path)
if err != nil {
diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go
similarity index 66%
rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_unsupported.go
rename to vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go
index 0060c167dc2..0114f2227cf 100644
--- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_unsupported.go
+++ b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go
@@ -2,10 +2,12 @@
package system
+// Lgetxattr is not supported on platforms other than linux.
func Lgetxattr(path string, attr string) ([]byte, error) {
return nil, ErrNotSupportedPlatform
}
+// Lsetxattr is not supported on platforms other than linux.
func Lsetxattr(path string, attr string, data []byte, flags int) error {
return ErrNotSupportedPlatform
}
diff --git a/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go b/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go
new file mode 100644
index 00000000000..d580584d61d
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go
@@ -0,0 +1,66 @@
+// Package tailfile provides helper functions to read the nth lines of any
+// ReadSeeker.
+package tailfile
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "os"
+)
+
+const blockSize = 1024
+
+var eol = []byte("\n")
+
+// ErrNonPositiveLinesNumber is an error returned if the lines number was negative.
+var ErrNonPositiveLinesNumber = errors.New("The number of lines to extract from the file must be positive")
+
+//TailFile returns last n lines of reader f (could be a fil).
+func TailFile(f io.ReadSeeker, n int) ([][]byte, error) {
+ if n <= 0 {
+ return nil, ErrNonPositiveLinesNumber
+ }
+ size, err := f.Seek(0, os.SEEK_END)
+ if err != nil {
+ return nil, err
+ }
+ block := -1
+ var data []byte
+ var cnt int
+ for {
+ var b []byte
+ step := int64(block * blockSize)
+ left := size + step // how many bytes to beginning
+ if left < 0 {
+ if _, err := f.Seek(0, os.SEEK_SET); err != nil {
+ return nil, err
+ }
+ b = make([]byte, blockSize+left)
+ if _, err := f.Read(b); err != nil {
+ return nil, err
+ }
+ data = append(b, data...)
+ break
+ } else {
+ b = make([]byte, blockSize)
+ if _, err := f.Seek(step, os.SEEK_END); err != nil {
+ return nil, err
+ }
+ if _, err := f.Read(b); err != nil {
+ return nil, err
+ }
+ data = append(b, data...)
+ }
+ cnt += bytes.Count(b, eol)
+ if cnt > n {
+ break
+ }
+ block--
+ }
+ lines := bytes.Split(data, eol)
+ if n < len(lines) {
+ return lines[len(lines)-n-1 : len(lines)-1], nil
+ }
+ return lines[:len(lines)-1], nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/tailfile/tailfile_test.go b/vendor/github.com/docker/docker/pkg/tailfile/tailfile_test.go
new file mode 100644
index 00000000000..31217c036cd
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/tailfile/tailfile_test.go
@@ -0,0 +1,148 @@
+package tailfile
+
+import (
+ "io/ioutil"
+ "os"
+ "testing"
+)
+
+func TestTailFile(t *testing.T) {
+ f, err := ioutil.TempFile("", "tail-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer f.Close()
+ defer os.RemoveAll(f.Name())
+ testFile := []byte(`first line
+second line
+third line
+fourth line
+fifth line
+next first line
+next second line
+next third line
+next fourth line
+next fifth line
+last first line
+next first line
+next second line
+next third line
+next fourth line
+next fifth line
+next first line
+next second line
+next third line
+next fourth line
+next fifth line
+last second line
+last third line
+last fourth line
+last fifth line
+truncated line`)
+ if _, err := f.Write(testFile); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := f.Seek(0, os.SEEK_SET); err != nil {
+ t.Fatal(err)
+ }
+ expected := []string{"last fourth line", "last fifth line"}
+ res, err := TailFile(f, 2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i, l := range res {
+ t.Logf("%s", l)
+ if expected[i] != string(l) {
+ t.Fatalf("Expected line %s, got %s", expected[i], l)
+ }
+ }
+}
+
+func TestTailFileManyLines(t *testing.T) {
+ f, err := ioutil.TempFile("", "tail-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer f.Close()
+ defer os.RemoveAll(f.Name())
+ testFile := []byte(`first line
+second line
+truncated line`)
+ if _, err := f.Write(testFile); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := f.Seek(0, os.SEEK_SET); err != nil {
+ t.Fatal(err)
+ }
+ expected := []string{"first line", "second line"}
+ res, err := TailFile(f, 10000)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i, l := range res {
+ t.Logf("%s", l)
+ if expected[i] != string(l) {
+ t.Fatalf("Expected line %s, got %s", expected[i], l)
+ }
+ }
+}
+
+func TestTailEmptyFile(t *testing.T) {
+ f, err := ioutil.TempFile("", "tail-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer f.Close()
+ defer os.RemoveAll(f.Name())
+ res, err := TailFile(f, 10000)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(res) != 0 {
+ t.Fatal("Must be empty slice from empty file")
+ }
+}
+
+func TestTailNegativeN(t *testing.T) {
+ f, err := ioutil.TempFile("", "tail-test")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer f.Close()
+ defer os.RemoveAll(f.Name())
+ testFile := []byte(`first line
+second line
+truncated line`)
+ if _, err := f.Write(testFile); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := f.Seek(0, os.SEEK_SET); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := TailFile(f, -1); err != ErrNonPositiveLinesNumber {
+ t.Fatalf("Expected ErrNonPositiveLinesNumber, got %s", err)
+ }
+ if _, err := TailFile(f, 0); err != ErrNonPositiveLinesNumber {
+ t.Fatalf("Expected ErrNonPositiveLinesNumber, got %s", err)
+ }
+}
+
+func BenchmarkTail(b *testing.B) {
+ f, err := ioutil.TempFile("", "tail-test")
+ if err != nil {
+ b.Fatal(err)
+ }
+ defer f.Close()
+ defer os.RemoveAll(f.Name())
+ for i := 0; i < 10000; i++ {
+ if _, err := f.Write([]byte("tailfile pretty interesting line\n")); err != nil {
+ b.Fatal(err)
+ }
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ if _, err := TailFile(f, 1000); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go b/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go
new file mode 100644
index 00000000000..b42983e9849
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go
@@ -0,0 +1,21 @@
+package tarsum
+
+// BuilderContext is an interface extending TarSum by adding the Remove method.
+// In general there was concern about adding this method to TarSum itself
+// so instead it is being added just to "BuilderContext" which will then
+// only be used during the .dockerignore file processing
+// - see builder/evaluator.go
+type BuilderContext interface {
+ TarSum
+ Remove(string)
+}
+
+func (bc *tarSum) Remove(filename string) {
+ for i, fis := range bc.sums {
+ if fis.Name() == filename {
+ bc.sums = append(bc.sums[:i], bc.sums[i+1:]...)
+ // Note, we don't just return because there could be
+ // more than one with this name
+ }
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/tarsum/builder_context_test.go b/vendor/github.com/docker/docker/pkg/tarsum/builder_context_test.go
new file mode 100644
index 00000000000..719f72895d7
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/tarsum/builder_context_test.go
@@ -0,0 +1,63 @@
+package tarsum
+
+import (
+ "io"
+ "io/ioutil"
+ "os"
+ "testing"
+)
+
+// Try to remove tarsum (in the BuilderContext) that do not exists, won't change a thing
+func TestTarSumRemoveNonExistent(t *testing.T) {
+ filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar"
+ reader, err := os.Open(filename)
+ if err != nil {
+ t.Fatal(err)
+ }
+ ts, err := NewTarSum(reader, false, Version0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Read and discard bytes so that it populates sums
+ _, err = io.Copy(ioutil.Discard, ts)
+ if err != nil {
+ t.Errorf("failed to read from %s: %s", filename, err)
+ }
+
+ expected := len(ts.GetSums())
+
+ ts.(BuilderContext).Remove("")
+ ts.(BuilderContext).Remove("Anything")
+
+ if len(ts.GetSums()) != expected {
+ t.Fatalf("Expected %v sums, go %v.", expected, ts.GetSums())
+ }
+}
+
+// Remove a tarsum (in the BuilderContext)
+func TestTarSumRemove(t *testing.T) {
+ filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar"
+ reader, err := os.Open(filename)
+ if err != nil {
+ t.Fatal(err)
+ }
+ ts, err := NewTarSum(reader, false, Version0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Read and discard bytes so that it populates sums
+ _, err = io.Copy(ioutil.Discard, ts)
+ if err != nil {
+ t.Errorf("failed to read from %s: %s", filename, err)
+ }
+
+ expected := len(ts.GetSums()) - 1
+
+ ts.(BuilderContext).Remove("etc/sudoers")
+
+ if len(ts.GetSums()) != expected {
+ t.Fatalf("Expected %v sums, go %v.", expected, len(ts.GetSums()))
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go b/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go
new file mode 100644
index 00000000000..5abf5e7ba39
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go
@@ -0,0 +1,126 @@
+package tarsum
+
+import "sort"
+
+// FileInfoSumInterface provides an interface for accessing file checksum
+// information within a tar file. This info is accessed through interface
+// so the actual name and sum cannot be melded with.
+type FileInfoSumInterface interface {
+ // File name
+ Name() string
+ // Checksum of this particular file and its headers
+ Sum() string
+ // Position of file in the tar
+ Pos() int64
+}
+
+type fileInfoSum struct {
+ name string
+ sum string
+ pos int64
+}
+
+func (fis fileInfoSum) Name() string {
+ return fis.name
+}
+func (fis fileInfoSum) Sum() string {
+ return fis.sum
+}
+func (fis fileInfoSum) Pos() int64 {
+ return fis.pos
+}
+
+// FileInfoSums provides a list of FileInfoSumInterfaces.
+type FileInfoSums []FileInfoSumInterface
+
+// GetFile returns the first FileInfoSumInterface with a matching name.
+func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface {
+ for i := range fis {
+ if fis[i].Name() == name {
+ return fis[i]
+ }
+ }
+ return nil
+}
+
+// GetAllFile returns a FileInfoSums with all matching names.
+func (fis FileInfoSums) GetAllFile(name string) FileInfoSums {
+ f := FileInfoSums{}
+ for i := range fis {
+ if fis[i].Name() == name {
+ f = append(f, fis[i])
+ }
+ }
+ return f
+}
+
+// GetDuplicatePaths returns a FileInfoSums with all duplicated paths.
+func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) {
+ seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map.
+ for i := range fis {
+ f := fis[i]
+ if _, ok := seen[f.Name()]; ok {
+ dups = append(dups, f)
+ } else {
+ seen[f.Name()] = 0
+ }
+ }
+ return dups
+}
+
+// Len returns the size of the FileInfoSums.
+func (fis FileInfoSums) Len() int { return len(fis) }
+
+// Swap swaps two FileInfoSum values if a FileInfoSums list.
+func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] }
+
+// SortByPos sorts FileInfoSums content by position.
+func (fis FileInfoSums) SortByPos() {
+ sort.Sort(byPos{fis})
+}
+
+// SortByNames sorts FileInfoSums content by name.
+func (fis FileInfoSums) SortByNames() {
+ sort.Sort(byName{fis})
+}
+
+// SortBySums sorts FileInfoSums content by sums.
+func (fis FileInfoSums) SortBySums() {
+ dups := fis.GetDuplicatePaths()
+ if len(dups) > 0 {
+ sort.Sort(bySum{fis, dups})
+ } else {
+ sort.Sort(bySum{fis, nil})
+ }
+}
+
+// byName is a sort.Sort helper for sorting by file names.
+// If names are the same, order them by their appearance in the tar archive
+type byName struct{ FileInfoSums }
+
+func (bn byName) Less(i, j int) bool {
+ if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() {
+ return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos()
+ }
+ return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name()
+}
+
+// bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive
+type bySum struct {
+ FileInfoSums
+ dups FileInfoSums
+}
+
+func (bs bySum) Less(i, j int) bool {
+ if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() {
+ return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos()
+ }
+ return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum()
+}
+
+// byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order
+type byPos struct{ FileInfoSums }
+
+func (bp byPos) Less(i, j int) bool {
+ return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos()
+}
diff --git a/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go b/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go
new file mode 100644
index 00000000000..bb700d8bdeb
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums_test.go
@@ -0,0 +1,62 @@
+package tarsum
+
+import "testing"
+
+func newFileInfoSums() FileInfoSums {
+ return FileInfoSums{
+ fileInfoSum{name: "file3", sum: "2abcdef1234567890", pos: 2},
+ fileInfoSum{name: "dup1", sum: "deadbeef1", pos: 5},
+ fileInfoSum{name: "file1", sum: "0abcdef1234567890", pos: 0},
+ fileInfoSum{name: "file4", sum: "3abcdef1234567890", pos: 3},
+ fileInfoSum{name: "dup1", sum: "deadbeef0", pos: 4},
+ fileInfoSum{name: "file2", sum: "1abcdef1234567890", pos: 1},
+ }
+}
+
+func TestSortFileInfoSums(t *testing.T) {
+ dups := newFileInfoSums().GetAllFile("dup1")
+ if len(dups) != 2 {
+ t.Errorf("expected length 2, got %d", len(dups))
+ }
+ dups.SortByNames()
+ if dups[0].Pos() != 4 {
+ t.Errorf("sorted dups should be ordered by position. Expected 4, got %d", dups[0].Pos())
+ }
+
+ fis := newFileInfoSums()
+ expected := "0abcdef1234567890"
+ fis.SortBySums()
+ got := fis[0].Sum()
+ if got != expected {
+ t.Errorf("Expected %q, got %q", expected, got)
+ }
+
+ fis = newFileInfoSums()
+ expected = "dup1"
+ fis.SortByNames()
+ gotFis := fis[0]
+ if gotFis.Name() != expected {
+ t.Errorf("Expected %q, got %q", expected, gotFis.Name())
+ }
+ // since a duplicate is first, ensure it is ordered first by position too
+ if gotFis.Pos() != 4 {
+ t.Errorf("Expected %d, got %d", 4, gotFis.Pos())
+ }
+
+ fis = newFileInfoSums()
+ fis.SortByPos()
+ if fis[0].Pos() != 0 {
+ t.Errorf("sorted fileInfoSums by Pos should order them by position.")
+ }
+
+ fis = newFileInfoSums()
+ expected = "deadbeef1"
+ gotFileInfoSum := fis.GetFile("dup1")
+ if gotFileInfoSum.Sum() != expected {
+ t.Errorf("Expected %q, got %q", expected, gotFileInfoSum)
+ }
+ if fis.GetFile("noPresent") != nil {
+ t.Errorf("Should have return nil if name not found.")
+ }
+
+}
diff --git a/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go b/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go
new file mode 100644
index 00000000000..154788db82e
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go
@@ -0,0 +1,295 @@
+// Package tarsum provides algorithms to perform checksum calculation on
+// filesystem layers.
+//
+// The transportation of filesystems, regarding Docker, is done with tar(1)
+// archives. There are a variety of tar serialization formats [2], and a key
+// concern here is ensuring a repeatable checksum given a set of inputs from a
+// generic tar archive. Types of transportation include distribution to and from a
+// registry endpoint, saving and loading through commands or Docker daemon APIs,
+// transferring the build context from client to Docker daemon, and committing the
+// filesystem of a container to become an image.
+//
+// As tar archives are used for transit, but not preserved in many situations, the
+// focus of the algorithm is to ensure the integrity of the preserved filesystem,
+// while maintaining a deterministic accountability. This includes neither
+// constraining the ordering or manipulation of the files during the creation or
+// unpacking of the archive, nor include additional metadata state about the file
+// system attributes.
+package tarsum
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "crypto"
+ "crypto/sha256"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+ "path"
+ "strings"
+)
+
+const (
+ buf8K = 8 * 1024
+ buf16K = 16 * 1024
+ buf32K = 32 * 1024
+)
+
+// NewTarSum creates a new interface for calculating a fixed time checksum of a
+// tar archive.
+//
+// This is used for calculating checksums of layers of an image, in some cases
+// including the byte payload of the image's json metadata as well, and for
+// calculating the checksums for buildcache.
+func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) {
+ return NewTarSumHash(r, dc, v, DefaultTHash)
+}
+
+// NewTarSumHash creates a new TarSum, providing a THash to use rather than
+// the DefaultTHash.
+func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) {
+ headerSelector, err := getTarHeaderSelector(v)
+ if err != nil {
+ return nil, err
+ }
+ ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash}
+ err = ts.initTarSum()
+ return ts, err
+}
+
+// NewTarSumForLabel creates a new TarSum using the provided TarSum version+hash label.
+func NewTarSumForLabel(r io.Reader, disableCompression bool, label string) (TarSum, error) {
+ parts := strings.SplitN(label, "+", 2)
+ if len(parts) != 2 {
+ return nil, errors.New("tarsum label string should be of the form: {tarsum_version}+{hash_name}")
+ }
+
+ versionName, hashName := parts[0], parts[1]
+
+ version, ok := tarSumVersionsByName[versionName]
+ if !ok {
+ return nil, fmt.Errorf("unknown TarSum version name: %q", versionName)
+ }
+
+ hashConfig, ok := standardHashConfigs[hashName]
+ if !ok {
+ return nil, fmt.Errorf("unknown TarSum hash name: %q", hashName)
+ }
+
+ tHash := NewTHash(hashConfig.name, hashConfig.hash.New)
+
+ return NewTarSumHash(r, disableCompression, version, tHash)
+}
+
+// TarSum is the generic interface for calculating fixed time
+// checksums of a tar archive.
+type TarSum interface {
+ io.Reader
+ GetSums() FileInfoSums
+ Sum([]byte) string
+ Version() Version
+ Hash() THash
+}
+
+// tarSum struct is the structure for a Version0 checksum calculation.
+type tarSum struct {
+ io.Reader
+ tarR *tar.Reader
+ tarW *tar.Writer
+ writer writeCloseFlusher
+ bufTar *bytes.Buffer
+ bufWriter *bytes.Buffer
+ bufData []byte
+ h hash.Hash
+ tHash THash
+ sums FileInfoSums
+ fileCounter int64
+ currentFile string
+ finished bool
+ first bool
+ DisableCompression bool // false by default. When false, the output gzip compressed.
+ tarSumVersion Version // this field is not exported so it can not be mutated during use
+ headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive
+}
+
+func (ts tarSum) Hash() THash {
+ return ts.tHash
+}
+
+func (ts tarSum) Version() Version {
+ return ts.tarSumVersion
+}
+
+// THash provides a hash.Hash type generator and its name.
+type THash interface {
+ Hash() hash.Hash
+ Name() string
+}
+
+// NewTHash is a convenience method for creating a THash.
+func NewTHash(name string, h func() hash.Hash) THash {
+ return simpleTHash{n: name, h: h}
+}
+
+type tHashConfig struct {
+ name string
+ hash crypto.Hash
+}
+
+var (
+ // NOTE: DO NOT include MD5 or SHA1, which are considered insecure.
+ standardHashConfigs = map[string]tHashConfig{
+ "sha256": {name: "sha256", hash: crypto.SHA256},
+ "sha512": {name: "sha512", hash: crypto.SHA512},
+ }
+)
+
+// DefaultTHash is default TarSum hashing algorithm - "sha256".
+var DefaultTHash = NewTHash("sha256", sha256.New)
+
+type simpleTHash struct {
+ n string
+ h func() hash.Hash
+}
+
+func (sth simpleTHash) Name() string { return sth.n }
+func (sth simpleTHash) Hash() hash.Hash { return sth.h() }
+
+func (ts *tarSum) encodeHeader(h *tar.Header) error {
+ for _, elem := range ts.headerSelector.selectHeaders(h) {
+ if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (ts *tarSum) initTarSum() error {
+ ts.bufTar = bytes.NewBuffer([]byte{})
+ ts.bufWriter = bytes.NewBuffer([]byte{})
+ ts.tarR = tar.NewReader(ts.Reader)
+ ts.tarW = tar.NewWriter(ts.bufTar)
+ if !ts.DisableCompression {
+ ts.writer = gzip.NewWriter(ts.bufWriter)
+ } else {
+ ts.writer = &nopCloseFlusher{Writer: ts.bufWriter}
+ }
+ if ts.tHash == nil {
+ ts.tHash = DefaultTHash
+ }
+ ts.h = ts.tHash.Hash()
+ ts.h.Reset()
+ ts.first = true
+ ts.sums = FileInfoSums{}
+ return nil
+}
+
+func (ts *tarSum) Read(buf []byte) (int, error) {
+ if ts.finished {
+ return ts.bufWriter.Read(buf)
+ }
+ if len(ts.bufData) < len(buf) {
+ switch {
+ case len(buf) <= buf8K:
+ ts.bufData = make([]byte, buf8K)
+ case len(buf) <= buf16K:
+ ts.bufData = make([]byte, buf16K)
+ case len(buf) <= buf32K:
+ ts.bufData = make([]byte, buf32K)
+ default:
+ ts.bufData = make([]byte, len(buf))
+ }
+ }
+ buf2 := ts.bufData[:len(buf)]
+
+ n, err := ts.tarR.Read(buf2)
+ if err != nil {
+ if err == io.EOF {
+ if _, err := ts.h.Write(buf2[:n]); err != nil {
+ return 0, err
+ }
+ if !ts.first {
+ ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter})
+ ts.fileCounter++
+ ts.h.Reset()
+ } else {
+ ts.first = false
+ }
+
+ currentHeader, err := ts.tarR.Next()
+ if err != nil {
+ if err == io.EOF {
+ if err := ts.tarW.Close(); err != nil {
+ return 0, err
+ }
+ if _, err := io.Copy(ts.writer, ts.bufTar); err != nil {
+ return 0, err
+ }
+ if err := ts.writer.Close(); err != nil {
+ return 0, err
+ }
+ ts.finished = true
+ return n, nil
+ }
+ return n, err
+ }
+ ts.currentFile = path.Clean(currentHeader.Name)
+ if err := ts.encodeHeader(currentHeader); err != nil {
+ return 0, err
+ }
+ if err := ts.tarW.WriteHeader(currentHeader); err != nil {
+ return 0, err
+ }
+ if _, err := ts.tarW.Write(buf2[:n]); err != nil {
+ return 0, err
+ }
+ ts.tarW.Flush()
+ if _, err := io.Copy(ts.writer, ts.bufTar); err != nil {
+ return 0, err
+ }
+ ts.writer.Flush()
+
+ return ts.bufWriter.Read(buf)
+ }
+ return n, err
+ }
+
+ // Filling the hash buffer
+ if _, err = ts.h.Write(buf2[:n]); err != nil {
+ return 0, err
+ }
+
+ // Filling the tar writer
+ if _, err = ts.tarW.Write(buf2[:n]); err != nil {
+ return 0, err
+ }
+ ts.tarW.Flush()
+
+ // Filling the output writer
+ if _, err = io.Copy(ts.writer, ts.bufTar); err != nil {
+ return 0, err
+ }
+ ts.writer.Flush()
+
+ return ts.bufWriter.Read(buf)
+}
+
+func (ts *tarSum) Sum(extra []byte) string {
+ ts.sums.SortBySums()
+ h := ts.tHash.Hash()
+ if extra != nil {
+ h.Write(extra)
+ }
+ for _, fis := range ts.sums {
+ h.Write([]byte(fis.Sum()))
+ }
+ checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil))
+ return checksum
+}
+
+func (ts *tarSum) GetSums() FileInfoSums {
+ return ts.sums
+}
diff --git a/vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md b/vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md
new file mode 100644
index 00000000000..89b2e49f985
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/tarsum/tarsum_spec.md
@@ -0,0 +1,230 @@
+page_title: TarSum checksum specification
+page_description: Documentation for algorithms used in the TarSum checksum calculation
+page_keywords: docker, checksum, validation, tarsum
+
+# TarSum Checksum Specification
+
+## Abstract
+
+This document describes the algorithms used in performing the TarSum checksum
+calculation on filesystem layers, the need for this method over existing
+methods, and the versioning of this calculation.
+
+## Warning
+
+This checksum algorithm is for best-effort comparison of file trees with fuzzy logic.
+
+This is _not_ a cryptographic attestation, and should not be considered secure.
+
+## Introduction
+
+The transportation of filesystems, regarding Docker, is done with tar(1)
+archives. There are a variety of tar serialization formats [2], and a key
+concern here is ensuring a repeatable checksum given a set of inputs from a
+generic tar archive. Types of transportation include distribution to and from a
+registry endpoint, saving and loading through commands or Docker daemon APIs,
+transferring the build context from client to Docker daemon, and committing the
+filesystem of a container to become an image.
+
+As tar archives are used for transit, but not preserved in many situations, the
+focus of the algorithm is to ensure the integrity of the preserved filesystem,
+while maintaining a deterministic accountability. This includes neither
+constraining the ordering or manipulation of the files during the creation or
+unpacking of the archive, nor include additional metadata state about the file
+system attributes.
+
+## Intended Audience
+
+This document is outlining the methods used for consistent checksum calculation
+for filesystems transported via tar archives.
+
+Auditing these methodologies is an open and iterative process. This document
+should accommodate the review of source code. Ultimately, this document should
+be the starting point of further refinements to the algorithm and its future
+versions.
+
+## Concept
+
+The checksum mechanism must ensure the integrity and assurance of the
+filesystem payload.
+
+## Checksum Algorithm Profile
+
+A checksum mechanism must define the following operations and attributes:
+
+* Associated hashing cipher - used to checksum each file payload and attribute
+ information.
+* Checksum list - each file of the filesystem archive has its checksum
+ calculated from the payload and attributes of the file. The final checksum is
+ calculated from this list, with specific ordering.
+* Version - as the algorithm adapts to requirements, there are behaviors of the
+ algorithm to manage by versioning.
+* Archive being calculated - the tar archive having its checksum calculated
+
+## Elements of TarSum checksum
+
+The calculated sum output is a text string. The elements included in the output
+of the calculated sum comprise the information needed for validation of the sum
+(TarSum version and hashing cipher used) and the expected checksum in hexadecimal
+form.
+
+There are two delimiters used:
+* '+' separates TarSum version from hashing cipher
+* ':' separates calculation mechanics from expected hash
+
+Example:
+
+```
+ "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e"
+ | | \ |
+ | | \ |
+ |_version_|_cipher__|__ |
+ | \ |
+ |_calculation_mechanics_|______________________expected_sum_______________________|
+```
+
+## Versioning
+
+Versioning was introduced [0] to accommodate differences in calculation needed,
+and ability to maintain reverse compatibility.
+
+The general algorithm will be describe further in the 'Calculation'.
+
+### Version0
+
+This is the initial version of TarSum.
+
+Its element in the TarSum checksum string is `tarsum`.
+
+### Version1
+
+Its element in the TarSum checksum is `tarsum.v1`.
+
+The notable changes in this version:
+* Exclusion of file `mtime` from the file information headers, in each file
+ checksum calculation
+* Inclusion of extended attributes (`xattrs`. Also seen as `SCHILY.xattr.` prefixed Pax
+ tar file info headers) keys and values in each file checksum calculation
+
+### VersionDev
+
+*Do not use unless validating refinements to the checksum algorithm*
+
+Its element in the TarSum checksum is `tarsum.dev`.
+
+This is a floating place holder for a next version and grounds for testing
+changes. The methods used for calculation are subject to change without notice,
+and this version is for testing and not for production use.
+
+## Ciphers
+
+The official default and standard hashing cipher used in the calculation mechanic
+is `sha256`. This refers to SHA256 hash algorithm as defined in FIPS 180-4.
+
+Though the TarSum algorithm itself is not exclusively bound to the single
+hashing cipher `sha256`, support for alternate hashing ciphers was later added
+[1]. Use cases for alternate cipher could include future-proofing TarSum
+checksum format and using faster cipher hashes for tar filesystem checksums.
+
+## Calculation
+
+### Requirement
+
+As mentioned earlier, the calculation is such that it takes into consideration
+the lifecycle of the tar archive. In that the tar archive is not an immutable,
+permanent artifact. Otherwise options like relying on a known hashing cipher
+checksum of the archive itself would be reliable enough. The tar archive of the
+filesystem is used as a transportation medium for Docker images, and the
+archive is discarded once its contents are extracted. Therefore, for consistent
+validation items such as order of files in the tar archive and time stamps are
+subject to change once an image is received.
+
+### Process
+
+The method is typically iterative due to reading tar info headers from the
+archive stream, though this is not a strict requirement.
+
+#### Files
+
+Each file in the tar archive have their contents (headers and body) checksummed
+individually using the designated associated hashing cipher. The ordered
+headers of the file are written to the checksum calculation first, and then the
+payload of the file body.
+
+The resulting checksum of the file is appended to the list of file sums. The
+sum is encoded as a string of the hexadecimal digest. Additionally, the file
+name and position in the archive is kept as reference for special ordering.
+
+#### Headers
+
+The following headers are read, in this
+order ( and the corresponding representation of its value):
+* 'name' - string
+* 'mode' - string of the base10 integer
+* 'uid' - string of the integer
+* 'gid' - string of the integer
+* 'size' - string of the integer
+* 'mtime' (_Version0 only_) - string of integer of the seconds since 1970-01-01 00:00:00 UTC
+* 'typeflag' - string of the char
+* 'linkname' - string
+* 'uname' - string
+* 'gname' - string
+* 'devmajor' - string of the integer
+* 'devminor' - string of the integer
+
+For >= Version1, the extended attribute headers ("SCHILY.xattr." prefixed pax
+headers) included after the above list. These xattrs key/values are first
+sorted by the keys.
+
+#### Header Format
+
+The ordered headers are written to the hash in the format of
+
+ "{.key}{.value}"
+
+with no newline.
+
+#### Body
+
+After the order headers of the file have been added to the checksum for the
+file, the body of the file is written to the hash.
+
+#### List of file sums
+
+The list of file sums is sorted by the string of the hexadecimal digest.
+
+If there are two files in the tar with matching paths, the order of occurrence
+for that path is reflected for the sums of the corresponding file header and
+body.
+
+#### Final Checksum
+
+Begin with a fresh or initial state of the associated hash cipher. If there is
+additional payload to include in the TarSum calculation for the archive, it is
+written first. Then each checksum from the ordered list of file sums is written
+to the hash.
+
+The resulting digest is formatted per the Elements of TarSum checksum,
+including the TarSum version, the associated hash cipher and the hexadecimal
+encoded checksum digest.
+
+## Security Considerations
+
+The initial version of TarSum has undergone one update that could invalidate
+handcrafted tar archives. The tar archive format supports appending of files
+with same names as prior files in the archive. The latter file will clobber the
+prior file of the same path. Due to this the algorithm now accounts for files
+with matching paths, and orders the list of file sums accordingly [3].
+
+## Footnotes
+
+* [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0
+* [1] Alternate ciphers https://github.com/docker/docker/commit/4e9925d780665149b8bc940d5ba242ada1973c4e
+* [2] Tar http://en.wikipedia.org/wiki/Tar_%28computing%29
+* [3] Name collision https://github.com/docker/docker/commit/c5e6362c53cbbc09ddbabd5a7323e04438b57d31
+
+## Acknowledgments
+
+Joffrey F (shin-) and Guillaume J. Charmes (creack) on the initial work of the
+TarSum calculation.
+
diff --git a/vendor/github.com/docker/docker/pkg/tarsum/tarsum_test.go b/vendor/github.com/docker/docker/pkg/tarsum/tarsum_test.go
new file mode 100644
index 00000000000..54bec53fc97
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/tarsum/tarsum_test.go
@@ -0,0 +1,656 @@
+package tarsum
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/gzip"
+ "crypto/md5"
+ "crypto/rand"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/sha512"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+ "testing"
+)
+
+type testLayer struct {
+ filename string
+ options *sizedOptions
+ jsonfile string
+ gzip bool
+ tarsum string
+ version Version
+ hash THash
+}
+
+var testLayers = []testLayer{
+ {
+ filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar",
+ jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json",
+ version: Version0,
+ tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"},
+ {
+ filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar",
+ jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json",
+ version: VersionDev,
+ tarsum: "tarsum.dev+sha256:db56e35eec6ce65ba1588c20ba6b1ea23743b59e81fb6b7f358ccbde5580345c"},
+ {
+ filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar",
+ jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json",
+ gzip: true,
+ tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"},
+ {
+ // Tests existing version of TarSum when xattrs are present
+ filename: "testdata/xattr/layer.tar",
+ jsonfile: "testdata/xattr/json",
+ version: Version0,
+ tarsum: "tarsum+sha256:07e304a8dbcb215b37649fde1a699f8aeea47e60815707f1cdf4d55d25ff6ab4"},
+ {
+ // Tests next version of TarSum when xattrs are present
+ filename: "testdata/xattr/layer.tar",
+ jsonfile: "testdata/xattr/json",
+ version: VersionDev,
+ tarsum: "tarsum.dev+sha256:6c58917892d77b3b357b0f9ad1e28e1f4ae4de3a8006bd3beb8beda214d8fd16"},
+ {
+ filename: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar",
+ jsonfile: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json",
+ tarsum: "tarsum+sha256:c66bd5ec9f87b8f4c6135ca37684618f486a3dd1d113b138d0a177bfa39c2571"},
+ {
+ options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory)
+ tarsum: "tarsum+sha256:8bf12d7e67c51ee2e8306cba569398b1b9f419969521a12ffb9d8875e8836738"},
+ {
+ // this tar has two files with the same path
+ filename: "testdata/collision/collision-0.tar",
+ tarsum: "tarsum+sha256:08653904a68d3ab5c59e65ef58c49c1581caa3c34744f8d354b3f575ea04424a"},
+ {
+ // this tar has the same two files (with the same path), but reversed order. ensuring is has different hash than above
+ filename: "testdata/collision/collision-1.tar",
+ tarsum: "tarsum+sha256:b51c13fbefe158b5ce420d2b930eef54c5cd55c50a2ee4abdddea8fa9f081e0d"},
+ {
+ // this tar has newer of collider-0.tar, ensuring is has different hash
+ filename: "testdata/collision/collision-2.tar",
+ tarsum: "tarsum+sha256:381547080919bb82691e995508ae20ed33ce0f6948d41cafbeb70ce20c73ee8e"},
+ {
+ // this tar has newer of collider-1.tar, ensuring is has different hash
+ filename: "testdata/collision/collision-3.tar",
+ tarsum: "tarsum+sha256:f886e431c08143164a676805205979cd8fa535dfcef714db5515650eea5a7c0f"},
+ {
+ options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory)
+ tarsum: "tarsum+md5:0d7529ec7a8360155b48134b8e599f53",
+ hash: md5THash,
+ },
+ {
+ options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory)
+ tarsum: "tarsum+sha1:f1fee39c5925807ff75ef1925e7a23be444ba4df",
+ hash: sha1Hash,
+ },
+ {
+ options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory)
+ tarsum: "tarsum+sha224:6319390c0b061d639085d8748b14cd55f697cf9313805218b21cf61c",
+ hash: sha224Hash,
+ },
+ {
+ options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory)
+ tarsum: "tarsum+sha384:a578ce3ce29a2ae03b8ed7c26f47d0f75b4fc849557c62454be4b5ffd66ba021e713b48ce71e947b43aab57afd5a7636",
+ hash: sha384Hash,
+ },
+ {
+ options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory)
+ tarsum: "tarsum+sha512:e9bfb90ca5a4dfc93c46ee061a5cf9837de6d2fdf82544d6460d3147290aecfabf7b5e415b9b6e72db9b8941f149d5d69fb17a394cbfaf2eac523bd9eae21855",
+ hash: sha512Hash,
+ },
+}
+
+type sizedOptions struct {
+ num int64
+ size int64
+ isRand bool
+ realFile bool
+}
+
+// make a tar:
+// * num is the number of files the tar should have
+// * size is the bytes per file
+// * isRand is whether the contents of the files should be a random chunk (otherwise it's all zeros)
+// * realFile will write to a TempFile, instead of an in memory buffer
+func sizedTar(opts sizedOptions) io.Reader {
+ var (
+ fh io.ReadWriter
+ err error
+ )
+ if opts.realFile {
+ fh, err = ioutil.TempFile("", "tarsum")
+ if err != nil {
+ return nil
+ }
+ } else {
+ fh = bytes.NewBuffer([]byte{})
+ }
+ tarW := tar.NewWriter(fh)
+ defer tarW.Close()
+ for i := int64(0); i < opts.num; i++ {
+ err := tarW.WriteHeader(&tar.Header{
+ Name: fmt.Sprintf("/testdata%d", i),
+ Mode: 0755,
+ Uid: 0,
+ Gid: 0,
+ Size: opts.size,
+ })
+ if err != nil {
+ return nil
+ }
+ var rBuf []byte
+ if opts.isRand {
+ rBuf = make([]byte, 8)
+ _, err = rand.Read(rBuf)
+ if err != nil {
+ return nil
+ }
+ } else {
+ rBuf = []byte{0, 0, 0, 0, 0, 0, 0, 0}
+ }
+
+ for i := int64(0); i < opts.size/int64(8); i++ {
+ tarW.Write(rBuf)
+ }
+ }
+ return fh
+}
+
+func emptyTarSum(gzip bool) (TarSum, error) {
+ reader, writer := io.Pipe()
+ tarWriter := tar.NewWriter(writer)
+
+ // Immediately close tarWriter and write-end of the
+ // Pipe in a separate goroutine so we don't block.
+ go func() {
+ tarWriter.Close()
+ writer.Close()
+ }()
+
+ return NewTarSum(reader, !gzip, Version0)
+}
+
+// Test errors on NewTarsumForLabel
+func TestNewTarSumForLabelInvalid(t *testing.T) {
+ reader := strings.NewReader("")
+
+ if _, err := NewTarSumForLabel(reader, true, "invalidlabel"); err == nil {
+ t.Fatalf("Expected an error, got nothing.")
+ }
+
+ if _, err := NewTarSumForLabel(reader, true, "invalid+sha256"); err == nil {
+ t.Fatalf("Expected an error, got nothing.")
+ }
+ if _, err := NewTarSumForLabel(reader, true, "tarsum.v1+invalid"); err == nil {
+ t.Fatalf("Expected an error, got nothing.")
+ }
+}
+
+func TestNewTarSumForLabel(t *testing.T) {
+
+ layer := testLayers[0]
+
+ reader, err := os.Open(layer.filename)
+ if err != nil {
+ t.Fatal(err)
+ }
+ label := strings.Split(layer.tarsum, ":")[0]
+ ts, err := NewTarSumForLabel(reader, false, label)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Make sure it actually worked by reading a little bit of it
+ nbByteToRead := 8 * 1024
+ dBuf := make([]byte, nbByteToRead)
+ _, err = ts.Read(dBuf)
+ if err != nil {
+ t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err)
+ }
+}
+
+// TestEmptyTar tests that tarsum does not fail to read an empty tar
+// and correctly returns the hex digest of an empty hash.
+func TestEmptyTar(t *testing.T) {
+ // Test without gzip.
+ ts, err := emptyTarSum(false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ zeroBlock := make([]byte, 1024)
+ buf := new(bytes.Buffer)
+
+ n, err := io.Copy(buf, ts)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), zeroBlock) {
+ t.Fatalf("tarSum did not write the correct number of zeroed bytes: %d", n)
+ }
+
+ expectedSum := ts.Version().String() + "+sha256:" + hex.EncodeToString(sha256.New().Sum(nil))
+ resultSum := ts.Sum(nil)
+
+ if resultSum != expectedSum {
+ t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum)
+ }
+
+ // Test with gzip.
+ ts, err = emptyTarSum(true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ buf.Reset()
+
+ n, err = io.Copy(buf, ts)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ bufgz := new(bytes.Buffer)
+ gz := gzip.NewWriter(bufgz)
+ n, err = io.Copy(gz, bytes.NewBuffer(zeroBlock))
+ gz.Close()
+ gzBytes := bufgz.Bytes()
+
+ if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), gzBytes) {
+ t.Fatalf("tarSum did not write the correct number of gzipped-zeroed bytes: %d", n)
+ }
+
+ resultSum = ts.Sum(nil)
+
+ if resultSum != expectedSum {
+ t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum)
+ }
+
+ // Test without ever actually writing anything.
+ if ts, err = NewTarSum(bytes.NewReader([]byte{}), true, Version0); err != nil {
+ t.Fatal(err)
+ }
+
+ resultSum = ts.Sum(nil)
+
+ if resultSum != expectedSum {
+ t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum)
+ }
+}
+
+var (
+ md5THash = NewTHash("md5", md5.New)
+ sha1Hash = NewTHash("sha1", sha1.New)
+ sha224Hash = NewTHash("sha224", sha256.New224)
+ sha384Hash = NewTHash("sha384", sha512.New384)
+ sha512Hash = NewTHash("sha512", sha512.New)
+)
+
+// Test all the build-in read size : buf8K, buf16K, buf32K and more
+func TestTarSumsReadSize(t *testing.T) {
+ // Test always on the same layer (that is big enough)
+ layer := testLayers[0]
+
+ for i := 0; i < 5; i++ {
+
+ reader, err := os.Open(layer.filename)
+ if err != nil {
+ t.Fatal(err)
+ }
+ ts, err := NewTarSum(reader, false, layer.version)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Read and discard bytes so that it populates sums
+ nbByteToRead := (i + 1) * 8 * 1024
+ dBuf := make([]byte, nbByteToRead)
+ _, err = ts.Read(dBuf)
+ if err != nil {
+ t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err)
+ continue
+ }
+ }
+}
+
+func TestTarSums(t *testing.T) {
+ for _, layer := range testLayers {
+ var (
+ fh io.Reader
+ err error
+ )
+ if len(layer.filename) > 0 {
+ fh, err = os.Open(layer.filename)
+ if err != nil {
+ t.Errorf("failed to open %s: %s", layer.filename, err)
+ continue
+ }
+ } else if layer.options != nil {
+ fh = sizedTar(*layer.options)
+ } else {
+ // What else is there to test?
+ t.Errorf("what to do with %#v", layer)
+ continue
+ }
+ if file, ok := fh.(*os.File); ok {
+ defer file.Close()
+ }
+
+ var ts TarSum
+ if layer.hash == nil {
+ // double negatives!
+ ts, err = NewTarSum(fh, !layer.gzip, layer.version)
+ } else {
+ ts, err = NewTarSumHash(fh, !layer.gzip, layer.version, layer.hash)
+ }
+ if err != nil {
+ t.Errorf("%q :: %q", err, layer.filename)
+ continue
+ }
+
+ // Read variable number of bytes to test dynamic buffer
+ dBuf := make([]byte, 1)
+ _, err = ts.Read(dBuf)
+ if err != nil {
+ t.Errorf("failed to read 1B from %s: %s", layer.filename, err)
+ continue
+ }
+ dBuf = make([]byte, 16*1024)
+ _, err = ts.Read(dBuf)
+ if err != nil {
+ t.Errorf("failed to read 16KB from %s: %s", layer.filename, err)
+ continue
+ }
+
+ // Read and discard remaining bytes
+ _, err = io.Copy(ioutil.Discard, ts)
+ if err != nil {
+ t.Errorf("failed to copy from %s: %s", layer.filename, err)
+ continue
+ }
+ var gotSum string
+ if len(layer.jsonfile) > 0 {
+ jfh, err := os.Open(layer.jsonfile)
+ if err != nil {
+ t.Errorf("failed to open %s: %s", layer.jsonfile, err)
+ continue
+ }
+ buf, err := ioutil.ReadAll(jfh)
+ if err != nil {
+ t.Errorf("failed to readAll %s: %s", layer.jsonfile, err)
+ continue
+ }
+ gotSum = ts.Sum(buf)
+ } else {
+ gotSum = ts.Sum(nil)
+ }
+
+ if layer.tarsum != gotSum {
+ t.Errorf("expecting [%s], but got [%s]", layer.tarsum, gotSum)
+ }
+ var expectedHashName string
+ if layer.hash != nil {
+ expectedHashName = layer.hash.Name()
+ } else {
+ expectedHashName = DefaultTHash.Name()
+ }
+ if expectedHashName != ts.Hash().Name() {
+ t.Errorf("expecting hash [%v], but got [%s]", expectedHashName, ts.Hash().Name())
+ }
+ }
+}
+
+func TestIteration(t *testing.T) {
+ headerTests := []struct {
+ expectedSum string // TODO(vbatts) it would be nice to get individual sums of each
+ version Version
+ hdr *tar.Header
+ data []byte
+ }{
+ {
+ "tarsum+sha256:626c4a2e9a467d65c33ae81f7f3dedd4de8ccaee72af73223c4bc4718cbc7bbd",
+ Version0,
+ &tar.Header{
+ Name: "file.txt",
+ Size: 0,
+ Typeflag: tar.TypeReg,
+ Devminor: 0,
+ Devmajor: 0,
+ },
+ []byte(""),
+ },
+ {
+ "tarsum.dev+sha256:6ffd43a1573a9913325b4918e124ee982a99c0f3cba90fc032a65f5e20bdd465",
+ VersionDev,
+ &tar.Header{
+ Name: "file.txt",
+ Size: 0,
+ Typeflag: tar.TypeReg,
+ Devminor: 0,
+ Devmajor: 0,
+ },
+ []byte(""),
+ },
+ {
+ "tarsum.dev+sha256:b38166c059e11fb77bef30bf16fba7584446e80fcc156ff46d47e36c5305d8ef",
+ VersionDev,
+ &tar.Header{
+ Name: "another.txt",
+ Uid: 1000,
+ Gid: 1000,
+ Uname: "slartibartfast",
+ Gname: "users",
+ Size: 4,
+ Typeflag: tar.TypeReg,
+ Devminor: 0,
+ Devmajor: 0,
+ },
+ []byte("test"),
+ },
+ {
+ "tarsum.dev+sha256:4cc2e71ac5d31833ab2be9b4f7842a14ce595ec96a37af4ed08f87bc374228cd",
+ VersionDev,
+ &tar.Header{
+ Name: "xattrs.txt",
+ Uid: 1000,
+ Gid: 1000,
+ Uname: "slartibartfast",
+ Gname: "users",
+ Size: 4,
+ Typeflag: tar.TypeReg,
+ Xattrs: map[string]string{
+ "user.key1": "value1",
+ "user.key2": "value2",
+ },
+ },
+ []byte("test"),
+ },
+ {
+ "tarsum.dev+sha256:65f4284fa32c0d4112dd93c3637697805866415b570587e4fd266af241503760",
+ VersionDev,
+ &tar.Header{
+ Name: "xattrs.txt",
+ Uid: 1000,
+ Gid: 1000,
+ Uname: "slartibartfast",
+ Gname: "users",
+ Size: 4,
+ Typeflag: tar.TypeReg,
+ Xattrs: map[string]string{
+ "user.KEY1": "value1", // adding different case to ensure different sum
+ "user.key2": "value2",
+ },
+ },
+ []byte("test"),
+ },
+ {
+ "tarsum+sha256:c12bb6f1303a9ddbf4576c52da74973c00d14c109bcfa76b708d5da1154a07fa",
+ Version0,
+ &tar.Header{
+ Name: "xattrs.txt",
+ Uid: 1000,
+ Gid: 1000,
+ Uname: "slartibartfast",
+ Gname: "users",
+ Size: 4,
+ Typeflag: tar.TypeReg,
+ Xattrs: map[string]string{
+ "user.NOT": "CALCULATED",
+ },
+ },
+ []byte("test"),
+ },
+ }
+ for _, htest := range headerTests {
+ s, err := renderSumForHeader(htest.version, htest.hdr, htest.data)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if s != htest.expectedSum {
+ t.Errorf("expected sum: %q, got: %q", htest.expectedSum, s)
+ }
+ }
+
+}
+
+func renderSumForHeader(v Version, h *tar.Header, data []byte) (string, error) {
+ buf := bytes.NewBuffer(nil)
+ // first build our test tar
+ tw := tar.NewWriter(buf)
+ if err := tw.WriteHeader(h); err != nil {
+ return "", err
+ }
+ if _, err := tw.Write(data); err != nil {
+ return "", err
+ }
+ tw.Close()
+
+ ts, err := NewTarSum(buf, true, v)
+ if err != nil {
+ return "", err
+ }
+ tr := tar.NewReader(ts)
+ for {
+ hdr, err := tr.Next()
+ if hdr == nil || err == io.EOF {
+ // Signals the end of the archive.
+ break
+ }
+ if err != nil {
+ return "", err
+ }
+ if _, err = io.Copy(ioutil.Discard, tr); err != nil {
+ return "", err
+ }
+ }
+ return ts.Sum(nil), nil
+}
+
+func Benchmark9kTar(b *testing.B) {
+ buf := bytes.NewBuffer([]byte{})
+ fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar")
+ if err != nil {
+ b.Error(err)
+ return
+ }
+ n, err := io.Copy(buf, fh)
+ if err != nil {
+ b.Error(err)
+ return
+ }
+ fh.Close()
+
+ reader := bytes.NewReader(buf.Bytes())
+
+ b.SetBytes(n)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ reader.Seek(0, 0)
+ ts, err := NewTarSum(reader, true, Version0)
+ if err != nil {
+ b.Error(err)
+ return
+ }
+ io.Copy(ioutil.Discard, ts)
+ ts.Sum(nil)
+ }
+}
+
+func Benchmark9kTarGzip(b *testing.B) {
+ buf := bytes.NewBuffer([]byte{})
+ fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar")
+ if err != nil {
+ b.Error(err)
+ return
+ }
+ n, err := io.Copy(buf, fh)
+ if err != nil {
+ b.Error(err)
+ return
+ }
+ fh.Close()
+
+ reader := bytes.NewReader(buf.Bytes())
+
+ b.SetBytes(n)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ reader.Seek(0, 0)
+ ts, err := NewTarSum(reader, false, Version0)
+ if err != nil {
+ b.Error(err)
+ return
+ }
+ io.Copy(ioutil.Discard, ts)
+ ts.Sum(nil)
+ }
+}
+
+// this is a single big file in the tar archive
+func Benchmark1mbSingleFileTar(b *testing.B) {
+ benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, false)
+}
+
+// this is a single big file in the tar archive
+func Benchmark1mbSingleFileTarGzip(b *testing.B) {
+ benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, true)
+}
+
+// this is 1024 1k files in the tar archive
+func Benchmark1kFilesTar(b *testing.B) {
+ benchmarkTar(b, sizedOptions{1024, 1024, true, true}, false)
+}
+
+// this is 1024 1k files in the tar archive
+func Benchmark1kFilesTarGzip(b *testing.B) {
+ benchmarkTar(b, sizedOptions{1024, 1024, true, true}, true)
+}
+
+func benchmarkTar(b *testing.B, opts sizedOptions, isGzip bool) {
+ var fh *os.File
+ tarReader := sizedTar(opts)
+ if br, ok := tarReader.(*os.File); ok {
+ fh = br
+ }
+ defer os.Remove(fh.Name())
+ defer fh.Close()
+
+ b.SetBytes(opts.size * opts.num)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ ts, err := NewTarSum(fh, !isGzip, Version0)
+ if err != nil {
+ b.Error(err)
+ return
+ }
+ io.Copy(ioutil.Discard, ts)
+ ts.Sum(nil)
+ fh.Seek(0, 0)
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json b/vendor/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json
new file mode 100644
index 00000000000..48e2af349cd
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json
@@ -0,0 +1 @@
+{"id":"46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457","parent":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","created":"2014-04-07T02:45:52.610504484Z","container":"e0f07f8d72cae171a3dcc35859960e7e956e0628bce6fedc4122bf55b2c287c7","container_config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","sed -ri 's/^(%wheel.*)(ALL)$/\\1NOPASSWD: \\2/' /etc/sudoers"],"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.9.1-dev","config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":3425}
\ No newline at end of file
diff --git a/vendor/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar b/vendor/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar
new file mode 100644
index 00000000000..dfd5c204aea
Binary files /dev/null and b/vendor/github.com/docker/docker/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar differ
diff --git a/vendor/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json b/vendor/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json
new file mode 100644
index 00000000000..af57be01ff1
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json
@@ -0,0 +1 @@
+{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0}
\ No newline at end of file
diff --git a/vendor/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar b/vendor/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar
new file mode 100644
index 00000000000..880b3f2c56a
Binary files /dev/null and b/vendor/github.com/docker/docker/pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar differ
diff --git a/vendor/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-0.tar b/vendor/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-0.tar
new file mode 100644
index 00000000000..1c636b3bc76
Binary files /dev/null and b/vendor/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-0.tar differ
diff --git a/vendor/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-1.tar b/vendor/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-1.tar
new file mode 100644
index 00000000000..b411be97857
Binary files /dev/null and b/vendor/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-1.tar differ
diff --git a/vendor/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-2.tar b/vendor/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-2.tar
new file mode 100644
index 00000000000..7b5c04a9644
Binary files /dev/null and b/vendor/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-2.tar differ
diff --git a/vendor/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-3.tar b/vendor/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-3.tar
new file mode 100644
index 00000000000..f8c64586d2d
Binary files /dev/null and b/vendor/github.com/docker/docker/pkg/tarsum/testdata/collision/collision-3.tar differ
diff --git a/vendor/github.com/docker/docker/pkg/tarsum/testdata/xattr/json b/vendor/github.com/docker/docker/pkg/tarsum/testdata/xattr/json
new file mode 100644
index 00000000000..288441a9405
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/tarsum/testdata/xattr/json
@@ -0,0 +1 @@
+{"id":"4439c3c7f847954100b42b267e7e5529cac1d6934db082f65795c5ca2e594d93","parent":"73b164f4437db87e96e90083c73a6592f549646ae2ec00ed33c6b9b49a5c4470","created":"2014-05-16T17:19:44.091534414Z","container":"5f92fb06cc58f357f0cde41394e2bbbb664e663974b2ac1693ab07b7a306749b","container_config":{"Hostname":"9565c6517a0e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","setcap 'cap_setgid,cap_setuid+ep' ./file \u0026\u0026 getcap ./file"],"Image":"73b164f4437db87e96e90083c73a6592f549646ae2ec00ed33c6b9b49a5c4470","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.11.1-dev","config":{"Hostname":"9565c6517a0e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"73b164f4437db87e96e90083c73a6592f549646ae2ec00ed33c6b9b49a5c4470","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":0}
\ No newline at end of file
diff --git a/vendor/github.com/docker/docker/pkg/tarsum/testdata/xattr/layer.tar b/vendor/github.com/docker/docker/pkg/tarsum/testdata/xattr/layer.tar
new file mode 100644
index 00000000000..819351d42f4
Binary files /dev/null and b/vendor/github.com/docker/docker/pkg/tarsum/testdata/xattr/layer.tar differ
diff --git a/vendor/github.com/docker/docker/pkg/tarsum/versioning.go b/vendor/github.com/docker/docker/pkg/tarsum/versioning.go
new file mode 100644
index 00000000000..28822868541
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/tarsum/versioning.go
@@ -0,0 +1,150 @@
+package tarsum
+
+import (
+ "archive/tar"
+ "errors"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Version is used for versioning of the TarSum algorithm
+// based on the prefix of the hash used
+// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b"
+type Version int
+
+// Prefix of "tarsum"
+const (
+ Version0 Version = iota
+ Version1
+ // VersionDev this constant will be either the latest or an unsettled next-version of the TarSum calculation
+ VersionDev
+)
+
+// VersionLabelForChecksum returns the label for the given tarsum
+// checksum, i.e., everything before the first `+` character in
+// the string or an empty string if no label separator is found.
+func VersionLabelForChecksum(checksum string) string {
+ // Checksums are in the form: {versionLabel}+{hashID}:{hex}
+ sepIndex := strings.Index(checksum, "+")
+ if sepIndex < 0 {
+ return ""
+ }
+ return checksum[:sepIndex]
+}
+
+// GetVersions gets a list of all known tarsum versions.
+func GetVersions() []Version {
+ v := []Version{}
+ for k := range tarSumVersions {
+ v = append(v, k)
+ }
+ return v
+}
+
+var (
+ tarSumVersions = map[Version]string{
+ Version0: "tarsum",
+ Version1: "tarsum.v1",
+ VersionDev: "tarsum.dev",
+ }
+ tarSumVersionsByName = map[string]Version{
+ "tarsum": Version0,
+ "tarsum.v1": Version1,
+ "tarsum.dev": VersionDev,
+ }
+)
+
+func (tsv Version) String() string {
+ return tarSumVersions[tsv]
+}
+
+// GetVersionFromTarsum returns the Version from the provided string.
+func GetVersionFromTarsum(tarsum string) (Version, error) {
+ tsv := tarsum
+ if strings.Contains(tarsum, "+") {
+ tsv = strings.SplitN(tarsum, "+", 2)[0]
+ }
+ for v, s := range tarSumVersions {
+ if s == tsv {
+ return v, nil
+ }
+ }
+ return -1, ErrNotVersion
+}
+
+// Errors that may be returned by functions in this package
+var (
+ ErrNotVersion = errors.New("string does not include a TarSum Version")
+ ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented")
+)
+
+// tarHeaderSelector is the interface which different versions
+// of tarsum should use for selecting and ordering tar headers
+// for each item in the archive.
+type tarHeaderSelector interface {
+ selectHeaders(h *tar.Header) (orderedHeaders [][2]string)
+}
+
+type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string)
+
+func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) {
+ return f(h)
+}
+
+func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
+ return [][2]string{
+ {"name", h.Name},
+ {"mode", strconv.FormatInt(h.Mode, 10)},
+ {"uid", strconv.Itoa(h.Uid)},
+ {"gid", strconv.Itoa(h.Gid)},
+ {"size", strconv.FormatInt(h.Size, 10)},
+ {"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)},
+ {"typeflag", string([]byte{h.Typeflag})},
+ {"linkname", h.Linkname},
+ {"uname", h.Uname},
+ {"gname", h.Gname},
+ {"devmajor", strconv.FormatInt(h.Devmajor, 10)},
+ {"devminor", strconv.FormatInt(h.Devminor, 10)},
+ }
+}
+
+func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
+ // Get extended attributes.
+ xAttrKeys := make([]string, len(h.Xattrs))
+ for k := range h.Xattrs {
+ xAttrKeys = append(xAttrKeys, k)
+ }
+ sort.Strings(xAttrKeys)
+
+ // Make the slice with enough capacity to hold the 11 basic headers
+ // we want from the v0 selector plus however many xattrs we have.
+ orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys))
+
+ // Copy all headers from v0 excluding the 'mtime' header (the 5th element).
+ v0headers := v0TarHeaderSelect(h)
+ orderedHeaders = append(orderedHeaders, v0headers[0:5]...)
+ orderedHeaders = append(orderedHeaders, v0headers[6:]...)
+
+ // Finally, append the sorted xattrs.
+ for _, k := range xAttrKeys {
+ orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]})
+ }
+
+ return
+}
+
+var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{
+ Version0: v0TarHeaderSelect,
+ Version1: v1TarHeaderSelect,
+ VersionDev: v1TarHeaderSelect,
+}
+
+func getTarHeaderSelector(v Version) (tarHeaderSelector, error) {
+ headerSelector, ok := registeredHeaderSelectors[v]
+ if !ok {
+ return nil, ErrVersionNotImplemented
+ }
+
+ return headerSelector, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/tarsum/versioning_test.go b/vendor/github.com/docker/docker/pkg/tarsum/versioning_test.go
new file mode 100644
index 00000000000..88e0a5783ce
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/tarsum/versioning_test.go
@@ -0,0 +1,98 @@
+package tarsum
+
+import (
+ "testing"
+)
+
+func TestVersionLabelForChecksum(t *testing.T) {
+ version := VersionLabelForChecksum("tarsum+sha256:deadbeef")
+ if version != "tarsum" {
+ t.Fatalf("Version should have been 'tarsum', was %v", version)
+ }
+ version = VersionLabelForChecksum("tarsum.v1+sha256:deadbeef")
+ if version != "tarsum.v1" {
+ t.Fatalf("Version should have been 'tarsum.v1', was %v", version)
+ }
+ version = VersionLabelForChecksum("something+somethingelse")
+ if version != "something" {
+ t.Fatalf("Version should have been 'something', was %v", version)
+ }
+ version = VersionLabelForChecksum("invalidChecksum")
+ if version != "" {
+ t.Fatalf("Version should have been empty, was %v", version)
+ }
+}
+
+func TestVersion(t *testing.T) {
+ expected := "tarsum"
+ var v Version
+ if v.String() != expected {
+ t.Errorf("expected %q, got %q", expected, v.String())
+ }
+
+ expected = "tarsum.v1"
+ v = 1
+ if v.String() != expected {
+ t.Errorf("expected %q, got %q", expected, v.String())
+ }
+
+ expected = "tarsum.dev"
+ v = 2
+ if v.String() != expected {
+ t.Errorf("expected %q, got %q", expected, v.String())
+ }
+}
+
+func TestGetVersion(t *testing.T) {
+ testSet := []struct {
+ Str string
+ Expected Version
+ }{
+ {"tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", Version0},
+ {"tarsum+sha256", Version0},
+ {"tarsum", Version0},
+ {"tarsum.dev", VersionDev},
+ {"tarsum.dev+sha256:deadbeef", VersionDev},
+ }
+
+ for _, ts := range testSet {
+ v, err := GetVersionFromTarsum(ts.Str)
+ if err != nil {
+ t.Fatalf("%q : %s", err, ts.Str)
+ }
+ if v != ts.Expected {
+ t.Errorf("expected %d (%q), got %d (%q)", ts.Expected, ts.Expected, v, v)
+ }
+ }
+
+ // test one that does not exist, to ensure it errors
+ str := "weak+md5:abcdeabcde"
+ _, err := GetVersionFromTarsum(str)
+ if err != ErrNotVersion {
+ t.Fatalf("%q : %s", err, str)
+ }
+}
+
+func TestGetVersions(t *testing.T) {
+ expected := []Version{
+ Version0,
+ Version1,
+ VersionDev,
+ }
+ versions := GetVersions()
+ if len(versions) != len(expected) {
+ t.Fatalf("Expected %v versions, got %v", len(expected), len(versions))
+ }
+ if !containsVersion(versions, expected[0]) || !containsVersion(versions, expected[1]) || !containsVersion(versions, expected[2]) {
+ t.Fatalf("Expected [%v], got [%v]", expected, versions)
+ }
+}
+
+func containsVersion(versions []Version, version Version) bool {
+ for _, v := range versions {
+ if v == version {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go b/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go
new file mode 100644
index 00000000000..9727ecde3eb
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go
@@ -0,0 +1,22 @@
+package tarsum
+
+import (
+ "io"
+)
+
+type writeCloseFlusher interface {
+ io.WriteCloser
+ Flush() error
+}
+
+type nopCloseFlusher struct {
+ io.Writer
+}
+
+func (n *nopCloseFlusher) Close() error {
+ return nil
+}
+
+func (n *nopCloseFlusher) Flush() error {
+ return nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/ascii.go b/vendor/github.com/docker/docker/pkg/term/ascii.go
new file mode 100644
index 00000000000..f5262bccf5f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/ascii.go
@@ -0,0 +1,66 @@
+package term
+
+import (
+ "fmt"
+ "strings"
+)
+
+// ASCII list the possible supported ASCII key sequence
+var ASCII = []string{
+ "ctrl-@",
+ "ctrl-a",
+ "ctrl-b",
+ "ctrl-c",
+ "ctrl-d",
+ "ctrl-e",
+ "ctrl-f",
+ "ctrl-g",
+ "ctrl-h",
+ "ctrl-i",
+ "ctrl-j",
+ "ctrl-k",
+ "ctrl-l",
+ "ctrl-m",
+ "ctrl-n",
+ "ctrl-o",
+ "ctrl-p",
+ "ctrl-q",
+ "ctrl-r",
+ "ctrl-s",
+ "ctrl-t",
+ "ctrl-u",
+ "ctrl-v",
+ "ctrl-w",
+ "ctrl-x",
+ "ctrl-y",
+ "ctrl-z",
+ "ctrl-[",
+ "ctrl-\\",
+ "ctrl-]",
+ "ctrl-^",
+ "ctrl-_",
+}
+
+// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code.
+func ToBytes(keys string) ([]byte, error) {
+ codes := []byte{}
+next:
+ for _, key := range strings.Split(keys, ",") {
+ if len(key) != 1 {
+ for code, ctrl := range ASCII {
+ if ctrl == key {
+ codes = append(codes, byte(code))
+ continue next
+ }
+ }
+ if key == "DEL" {
+ codes = append(codes, 127)
+ } else {
+ return nil, fmt.Errorf("Unknown character: '%s'", key)
+ }
+ } else {
+ codes = append(codes, byte(key[0]))
+ }
+ }
+ return codes, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/ascii_test.go b/vendor/github.com/docker/docker/pkg/term/ascii_test.go
new file mode 100644
index 00000000000..4a1e7f302c1
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/ascii_test.go
@@ -0,0 +1,43 @@
+package term
+
+import "testing"
+
+func TestToBytes(t *testing.T) {
+ codes, err := ToBytes("ctrl-a,a")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(codes) != 2 {
+ t.Fatalf("Expected 2 codes, got %d", len(codes))
+ }
+ if codes[0] != 1 || codes[1] != 97 {
+ t.Fatalf("Expected '1' '97', got '%d' '%d'", codes[0], codes[1])
+ }
+
+ codes, err = ToBytes("shift-z")
+ if err == nil {
+ t.Fatalf("Expected error, got none")
+ }
+
+ codes, err = ToBytes("ctrl-@,ctrl-[,~,ctrl-o")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(codes) != 4 {
+ t.Fatalf("Expected 4 codes, got %d", len(codes))
+ }
+ if codes[0] != 0 || codes[1] != 27 || codes[2] != 126 || codes[3] != 15 {
+ t.Fatalf("Expected '0' '27' '126', '15', got '%d' '%d' '%d' '%d'", codes[0], codes[1], codes[2], codes[3])
+ }
+
+ codes, err = ToBytes("DEL,+")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(codes) != 2 {
+ t.Fatalf("Expected 2 codes, got %d", len(codes))
+ }
+ if codes[0] != 127 || codes[1] != 43 {
+ t.Fatalf("Expected '127 '43'', got '%d' '%d'", codes[0], codes[1])
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go b/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go
new file mode 100644
index 00000000000..59dac5ba8e4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/tc_linux_cgo.go
@@ -0,0 +1,50 @@
+// +build linux,cgo
+
+package term
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// #include
+import "C"
+
+// Termios is the Unix API for terminal I/O.
+// It is passthrough for syscall.Termios in order to make it portable with
+// other platforms where it is not available or handled differently.
+type Termios syscall.Termios
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd uintptr) (*State, error) {
+ var oldState State
+ if err := tcget(fd, &oldState.termios); err != 0 {
+ return nil, err
+ }
+
+ newState := oldState.termios
+
+ C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState)))
+ if err := tcset(fd, &newState); err != 0 {
+ return nil, err
+ }
+ return &oldState, nil
+}
+
+func tcget(fd uintptr, p *Termios) syscall.Errno {
+ ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p)))
+ if ret != 0 {
+ return err.(syscall.Errno)
+ }
+ return 0
+}
+
+func tcset(fd uintptr, p *Termios) syscall.Errno {
+ ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p)))
+ if ret != 0 {
+ return err.(syscall.Errno)
+ }
+ return 0
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/tc_other.go b/vendor/github.com/docker/docker/pkg/term/tc_other.go
new file mode 100644
index 00000000000..750d7c3f607
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/tc_other.go
@@ -0,0 +1,20 @@
+// +build !windows
+// +build !linux !cgo
+// +build !solaris !cgo
+
+package term
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func tcget(fd uintptr, p *Termios) syscall.Errno {
+ _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p)))
+ return err
+}
+
+func tcset(fd uintptr, p *Termios) syscall.Errno {
+ _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p)))
+ return err
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go b/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go
new file mode 100644
index 00000000000..c9139d0ca80
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/tc_solaris_cgo.go
@@ -0,0 +1,63 @@
+// +build solaris,cgo
+
+package term
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// #include
+import "C"
+
+// Termios is the Unix API for terminal I/O.
+// It is passthrough for syscall.Termios in order to make it portable with
+// other platforms where it is not available or handled differently.
+type Termios syscall.Termios
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd uintptr) (*State, error) {
+ var oldState State
+ if err := tcget(fd, &oldState.termios); err != 0 {
+ return nil, err
+ }
+
+ newState := oldState.termios
+
+ newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON | syscall.IXANY)
+ newState.Oflag &^= syscall.OPOST
+ newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN)
+ newState.Cflag &^= (syscall.CSIZE | syscall.PARENB)
+ newState.Cflag |= syscall.CS8
+
+ /*
+ VMIN is the minimum number of characters that needs to be read in non-canonical mode for it to be returned
+ Since VMIN is overloaded with another element in canonical mode when we switch modes it defaults to 4. It
+ needs to be explicitly set to 1.
+ */
+ newState.Cc[C.VMIN] = 1
+ newState.Cc[C.VTIME] = 0
+
+ if err := tcset(fd, &newState); err != 0 {
+ return nil, err
+ }
+ return &oldState, nil
+}
+
+func tcget(fd uintptr, p *Termios) syscall.Errno {
+ ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p)))
+ if ret != 0 {
+ return err.(syscall.Errno)
+ }
+ return 0
+}
+
+func tcset(fd uintptr, p *Termios) syscall.Errno {
+ ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p)))
+ if ret != 0 {
+ return err.(syscall.Errno)
+ }
+ return 0
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/term.go b/vendor/github.com/docker/docker/pkg/term/term.go
new file mode 100644
index 00000000000..1609a900a96
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/term.go
@@ -0,0 +1,117 @@
+// +build !windows
+
+// Package term provides provides structures and helper functions to work with
+// terminal (state, sizes).
+package term
+
+import (
+ "errors"
+ "io"
+ "os"
+ "os/signal"
+ "syscall"
+)
+
+var (
+ // ErrInvalidState is returned if the state of the terminal is invalid.
+ ErrInvalidState = errors.New("Invalid terminal state")
+)
+
+// State represents the state of the terminal.
+type State struct {
+ termios Termios
+}
+
+// Winsize represents the size of the terminal window.
+type Winsize struct {
+ Height uint16
+ Width uint16
+ x uint16
+ y uint16
+}
+
+// StdStreams returns the standard streams (stdin, stdout, stedrr).
+func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
+ return os.Stdin, os.Stdout, os.Stderr
+}
+
+// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal.
+func GetFdInfo(in interface{}) (uintptr, bool) {
+ var inFd uintptr
+ var isTerminalIn bool
+ if file, ok := in.(*os.File); ok {
+ inFd = file.Fd()
+ isTerminalIn = IsTerminal(inFd)
+ }
+ return inFd, isTerminalIn
+}
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal(fd uintptr) bool {
+ var termios Termios
+ return tcget(fd, &termios) == 0
+}
+
+// RestoreTerminal restores the terminal connected to the given file descriptor
+// to a previous state.
+func RestoreTerminal(fd uintptr, state *State) error {
+ if state == nil {
+ return ErrInvalidState
+ }
+ if err := tcset(fd, &state.termios); err != 0 {
+ return err
+ }
+ return nil
+}
+
+// SaveState saves the state of the terminal connected to the given file descriptor.
+func SaveState(fd uintptr) (*State, error) {
+ var oldState State
+ if err := tcget(fd, &oldState.termios); err != 0 {
+ return nil, err
+ }
+
+ return &oldState, nil
+}
+
+// DisableEcho applies the specified state to the terminal connected to the file
+// descriptor, with echo disabled.
+func DisableEcho(fd uintptr, state *State) error {
+ newState := state.termios
+ newState.Lflag &^= syscall.ECHO
+
+ if err := tcset(fd, &newState); err != 0 {
+ return err
+ }
+ handleInterrupt(fd, state)
+ return nil
+}
+
+// SetRawTerminal puts the terminal connected to the given file descriptor into
+// raw mode and returns the previous state. On UNIX, this puts both the input
+// and output into raw mode. On Windows, it only puts the input into raw mode.
+func SetRawTerminal(fd uintptr) (*State, error) {
+ oldState, err := MakeRaw(fd)
+ if err != nil {
+ return nil, err
+ }
+ handleInterrupt(fd, oldState)
+ return oldState, err
+}
+
+// SetRawTerminalOutput puts the output of terminal connected to the given file
+// descriptor into raw mode. On UNIX, this does nothing and returns nil for the
+// state. On Windows, it disables LF -> CRLF translation.
+func SetRawTerminalOutput(fd uintptr) (*State, error) {
+ return nil, nil
+}
+
+func handleInterrupt(fd uintptr, state *State) {
+ sigchan := make(chan os.Signal, 1)
+ signal.Notify(sigchan, os.Interrupt)
+
+ go func() {
+ _ = <-sigchan
+ RestoreTerminal(fd, state)
+ }()
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/term_solaris.go b/vendor/github.com/docker/docker/pkg/term/term_solaris.go
new file mode 100644
index 00000000000..112debbec56
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/term_solaris.go
@@ -0,0 +1,41 @@
+// +build solaris
+
+package term
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+/*
+#include
+#include
+#include
+
+// Small wrapper to get rid of variadic args of ioctl()
+int my_ioctl(int fd, int cmd, struct winsize *ws) {
+ return ioctl(fd, cmd, ws);
+}
+*/
+import "C"
+
+// GetWinsize returns the window size based on the specified file descriptor.
+func GetWinsize(fd uintptr) (*Winsize, error) {
+ ws := &Winsize{}
+ ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCGWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws)))
+ // Skip retval = 0
+ if ret == 0 {
+ return ws, nil
+ }
+ return ws, err
+}
+
+// SetWinsize tries to set the specified window size for the specified file descriptor.
+func SetWinsize(fd uintptr, ws *Winsize) error {
+ ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCSWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws)))
+ // Skip retval = 0
+ if ret == 0 {
+ return nil
+ }
+ return err
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/term_unix.go b/vendor/github.com/docker/docker/pkg/term/term_unix.go
new file mode 100644
index 00000000000..ddf87a0e58a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/term_unix.go
@@ -0,0 +1,29 @@
+// +build !solaris,!windows
+
+package term
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+// GetWinsize returns the window size based on the specified file descriptor.
+func GetWinsize(fd uintptr) (*Winsize, error) {
+ ws := &Winsize{}
+ _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws)))
+ // Skipp errno = 0
+ if err == 0 {
+ return ws, nil
+ }
+ return ws, err
+}
+
+// SetWinsize tries to set the specified window size for the specified file descriptor.
+func SetWinsize(fd uintptr, ws *Winsize) error {
+ _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws)))
+ // Skipp errno = 0
+ if err == 0 {
+ return nil
+ }
+ return err
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/term_windows.go b/vendor/github.com/docker/docker/pkg/term/term_windows.go
new file mode 100644
index 00000000000..dc50da4577a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/term_windows.go
@@ -0,0 +1,232 @@
+// +build windows
+
+package term
+
+import (
+ "io"
+ "os"
+ "os/signal"
+ "syscall"
+
+ "github.com/Azure/go-ansiterm/winterm"
+ "github.com/docker/docker/pkg/term/windows"
+)
+
+// State holds the console mode for the terminal.
+type State struct {
+ mode uint32
+}
+
+// Winsize is used for window size.
+type Winsize struct {
+ Height uint16
+ Width uint16
+}
+
+const (
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx
+ enableVirtualTerminalInput = 0x0200
+ enableVirtualTerminalProcessing = 0x0004
+ disableNewlineAutoReturn = 0x0008
+)
+
+// vtInputSupported is true if enableVirtualTerminalInput is supported by the console
+var vtInputSupported bool
+
+// StdStreams returns the standard streams (stdin, stdout, stedrr).
+func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
+ // Turn on VT handling on all std handles, if possible. This might
+ // fail, in which case we will fall back to terminal emulation.
+ var emulateStdin, emulateStdout, emulateStderr bool
+ fd := os.Stdin.Fd()
+ if mode, err := winterm.GetConsoleMode(fd); err == nil {
+ // Validate that enableVirtualTerminalInput is supported, but do not set it.
+ if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalInput); err != nil {
+ emulateStdin = true
+ } else {
+ vtInputSupported = true
+ }
+ // Unconditionally set the console mode back even on failure because SetConsoleMode
+ // remembers invalid bits on input handles.
+ winterm.SetConsoleMode(fd, mode)
+ }
+
+ fd = os.Stdout.Fd()
+ if mode, err := winterm.GetConsoleMode(fd); err == nil {
+ // Validate disableNewlineAutoReturn is supported, but do not set it.
+ if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil {
+ emulateStdout = true
+ } else {
+ winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing)
+ }
+ }
+
+ fd = os.Stderr.Fd()
+ if mode, err := winterm.GetConsoleMode(fd); err == nil {
+ // Validate disableNewlineAutoReturn is supported, but do not set it.
+ if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil {
+ emulateStderr = true
+ } else {
+ winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing)
+ }
+ }
+
+ if os.Getenv("ConEmuANSI") == "ON" {
+ // The ConEmu terminal emulates ANSI on output streams well.
+ emulateStdout = false
+ emulateStderr = false
+ }
+
+ if emulateStdin {
+ stdIn = windows.NewAnsiReader(syscall.STD_INPUT_HANDLE)
+ } else {
+ stdIn = os.Stdin
+ }
+
+ if emulateStdout {
+ stdOut = windows.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE)
+ } else {
+ stdOut = os.Stdout
+ }
+
+ if emulateStderr {
+ stdErr = windows.NewAnsiWriter(syscall.STD_ERROR_HANDLE)
+ } else {
+ stdErr = os.Stderr
+ }
+
+ return
+}
+
+// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal.
+func GetFdInfo(in interface{}) (uintptr, bool) {
+ return windows.GetHandleInfo(in)
+}
+
+// GetWinsize returns the window size based on the specified file descriptor.
+func GetWinsize(fd uintptr) (*Winsize, error) {
+ info, err := winterm.GetConsoleScreenBufferInfo(fd)
+ if err != nil {
+ return nil, err
+ }
+
+ winsize := &Winsize{
+ Width: uint16(info.Window.Right - info.Window.Left + 1),
+ Height: uint16(info.Window.Bottom - info.Window.Top + 1),
+ }
+
+ return winsize, nil
+}
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal(fd uintptr) bool {
+ return windows.IsConsole(fd)
+}
+
+// RestoreTerminal restores the terminal connected to the given file descriptor
+// to a previous state.
+func RestoreTerminal(fd uintptr, state *State) error {
+ return winterm.SetConsoleMode(fd, state.mode)
+}
+
+// SaveState saves the state of the terminal connected to the given file descriptor.
+func SaveState(fd uintptr) (*State, error) {
+ mode, e := winterm.GetConsoleMode(fd)
+ if e != nil {
+ return nil, e
+ }
+
+ return &State{mode: mode}, nil
+}
+
+// DisableEcho disables echo for the terminal connected to the given file descriptor.
+// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
+func DisableEcho(fd uintptr, state *State) error {
+ mode := state.mode
+ mode &^= winterm.ENABLE_ECHO_INPUT
+ mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT
+ err := winterm.SetConsoleMode(fd, mode)
+ if err != nil {
+ return err
+ }
+
+ // Register an interrupt handler to catch and restore prior state
+ restoreAtInterrupt(fd, state)
+ return nil
+}
+
+// SetRawTerminal puts the terminal connected to the given file descriptor into
+// raw mode and returns the previous state. On UNIX, this puts both the input
+// and output into raw mode. On Windows, it only puts the input into raw mode.
+func SetRawTerminal(fd uintptr) (*State, error) {
+ state, err := MakeRaw(fd)
+ if err != nil {
+ return nil, err
+ }
+
+ // Register an interrupt handler to catch and restore prior state
+ restoreAtInterrupt(fd, state)
+ return state, err
+}
+
+// SetRawTerminalOutput puts the output of terminal connected to the given file
+// descriptor into raw mode. On UNIX, this does nothing and returns nil for the
+// state. On Windows, it disables LF -> CRLF translation.
+func SetRawTerminalOutput(fd uintptr) (*State, error) {
+ state, err := SaveState(fd)
+ if err != nil {
+ return nil, err
+ }
+
+ // Ignore failures, since disableNewlineAutoReturn might not be supported on this
+ // version of Windows.
+ winterm.SetConsoleMode(fd, state.mode|disableNewlineAutoReturn)
+ return state, err
+}
+
+// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be restored.
+func MakeRaw(fd uintptr) (*State, error) {
+ state, err := SaveState(fd)
+ if err != nil {
+ return nil, err
+ }
+
+ mode := state.mode
+
+ // See
+ // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
+ // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
+
+ // Disable these modes
+ mode &^= winterm.ENABLE_ECHO_INPUT
+ mode &^= winterm.ENABLE_LINE_INPUT
+ mode &^= winterm.ENABLE_MOUSE_INPUT
+ mode &^= winterm.ENABLE_WINDOW_INPUT
+ mode &^= winterm.ENABLE_PROCESSED_INPUT
+
+ // Enable these modes
+ mode |= winterm.ENABLE_EXTENDED_FLAGS
+ mode |= winterm.ENABLE_INSERT_MODE
+ mode |= winterm.ENABLE_QUICK_EDIT_MODE
+ if vtInputSupported {
+ mode |= enableVirtualTerminalInput
+ }
+
+ err = winterm.SetConsoleMode(fd, mode)
+ if err != nil {
+ return nil, err
+ }
+ return state, nil
+}
+
+func restoreAtInterrupt(fd uintptr, state *State) {
+ sigchan := make(chan os.Signal, 1)
+ signal.Notify(sigchan, os.Interrupt)
+
+ go func() {
+ _ = <-sigchan
+ RestoreTerminal(fd, state)
+ os.Exit(0)
+ }()
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/termios_darwin.go b/vendor/github.com/docker/docker/pkg/term/termios_darwin.go
new file mode 100644
index 00000000000..480db900ac9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/termios_darwin.go
@@ -0,0 +1,69 @@
+package term
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+const (
+ getTermios = syscall.TIOCGETA
+ setTermios = syscall.TIOCSETA
+)
+
+// Termios magic numbers, passthrough to the ones defined in syscall.
+const (
+ IGNBRK = syscall.IGNBRK
+ PARMRK = syscall.PARMRK
+ INLCR = syscall.INLCR
+ IGNCR = syscall.IGNCR
+ ECHONL = syscall.ECHONL
+ CSIZE = syscall.CSIZE
+ ICRNL = syscall.ICRNL
+ ISTRIP = syscall.ISTRIP
+ PARENB = syscall.PARENB
+ ECHO = syscall.ECHO
+ ICANON = syscall.ICANON
+ ISIG = syscall.ISIG
+ IXON = syscall.IXON
+ BRKINT = syscall.BRKINT
+ INPCK = syscall.INPCK
+ OPOST = syscall.OPOST
+ CS8 = syscall.CS8
+ IEXTEN = syscall.IEXTEN
+)
+
+// Termios is the Unix API for terminal I/O.
+type Termios struct {
+ Iflag uint64
+ Oflag uint64
+ Cflag uint64
+ Lflag uint64
+ Cc [20]byte
+ Ispeed uint64
+ Ospeed uint64
+}
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd uintptr) (*State, error) {
+ var oldState State
+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
+ return nil, err
+ }
+
+ newState := oldState.termios
+ newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON)
+ newState.Oflag &^= OPOST
+ newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN)
+ newState.Cflag &^= (CSIZE | PARENB)
+ newState.Cflag |= CS8
+ newState.Cc[syscall.VMIN] = 1
+ newState.Cc[syscall.VTIME] = 0
+
+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 {
+ return nil, err
+ }
+
+ return &oldState, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/termios_freebsd.go b/vendor/github.com/docker/docker/pkg/term/termios_freebsd.go
new file mode 100644
index 00000000000..ed843ad69c9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/termios_freebsd.go
@@ -0,0 +1,69 @@
+package term
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+const (
+ getTermios = syscall.TIOCGETA
+ setTermios = syscall.TIOCSETA
+)
+
+// Termios magic numbers, passthrough to the ones defined in syscall.
+const (
+ IGNBRK = syscall.IGNBRK
+ PARMRK = syscall.PARMRK
+ INLCR = syscall.INLCR
+ IGNCR = syscall.IGNCR
+ ECHONL = syscall.ECHONL
+ CSIZE = syscall.CSIZE
+ ICRNL = syscall.ICRNL
+ ISTRIP = syscall.ISTRIP
+ PARENB = syscall.PARENB
+ ECHO = syscall.ECHO
+ ICANON = syscall.ICANON
+ ISIG = syscall.ISIG
+ IXON = syscall.IXON
+ BRKINT = syscall.BRKINT
+ INPCK = syscall.INPCK
+ OPOST = syscall.OPOST
+ CS8 = syscall.CS8
+ IEXTEN = syscall.IEXTEN
+)
+
+// Termios is the Unix API for terminal I/O.
+type Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]byte
+ Ispeed uint32
+ Ospeed uint32
+}
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd uintptr) (*State, error) {
+ var oldState State
+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
+ return nil, err
+ }
+
+ newState := oldState.termios
+ newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON)
+ newState.Oflag &^= OPOST
+ newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN)
+ newState.Cflag &^= (CSIZE | PARENB)
+ newState.Cflag |= CS8
+ newState.Cc[syscall.VMIN] = 1
+ newState.Cc[syscall.VTIME] = 0
+
+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 {
+ return nil, err
+ }
+
+ return &oldState, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/termios_linux.go b/vendor/github.com/docker/docker/pkg/term/termios_linux.go
new file mode 100644
index 00000000000..22921b6aef3
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/termios_linux.go
@@ -0,0 +1,47 @@
+// +build !cgo
+
+package term
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+const (
+ getTermios = syscall.TCGETS
+ setTermios = syscall.TCSETS
+)
+
+// Termios is the Unix API for terminal I/O.
+type Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]byte
+ Ispeed uint32
+ Ospeed uint32
+}
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd uintptr) (*State, error) {
+ var oldState State
+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
+ return nil, err
+ }
+
+ newState := oldState.termios
+
+ newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON)
+ newState.Oflag &^= syscall.OPOST
+ newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN)
+ newState.Cflag &^= (syscall.CSIZE | syscall.PARENB)
+ newState.Cflag |= syscall.CS8
+
+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 {
+ return nil, err
+ }
+ return &oldState, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go b/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go
new file mode 100644
index 00000000000..ed843ad69c9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/termios_openbsd.go
@@ -0,0 +1,69 @@
+package term
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+const (
+ getTermios = syscall.TIOCGETA
+ setTermios = syscall.TIOCSETA
+)
+
+// Termios magic numbers, passthrough to the ones defined in syscall.
+const (
+ IGNBRK = syscall.IGNBRK
+ PARMRK = syscall.PARMRK
+ INLCR = syscall.INLCR
+ IGNCR = syscall.IGNCR
+ ECHONL = syscall.ECHONL
+ CSIZE = syscall.CSIZE
+ ICRNL = syscall.ICRNL
+ ISTRIP = syscall.ISTRIP
+ PARENB = syscall.PARENB
+ ECHO = syscall.ECHO
+ ICANON = syscall.ICANON
+ ISIG = syscall.ISIG
+ IXON = syscall.IXON
+ BRKINT = syscall.BRKINT
+ INPCK = syscall.INPCK
+ OPOST = syscall.OPOST
+ CS8 = syscall.CS8
+ IEXTEN = syscall.IEXTEN
+)
+
+// Termios is the Unix API for terminal I/O.
+type Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]byte
+ Ispeed uint32
+ Ospeed uint32
+}
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd uintptr) (*State, error) {
+ var oldState State
+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
+ return nil, err
+ }
+
+ newState := oldState.termios
+ newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON)
+ newState.Oflag &^= OPOST
+ newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN)
+ newState.Cflag &^= (CSIZE | PARENB)
+ newState.Cflag |= CS8
+ newState.Cc[syscall.VMIN] = 1
+ newState.Cc[syscall.VTIME] = 0
+
+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 {
+ return nil, err
+ }
+
+ return &oldState, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go
new file mode 100644
index 00000000000..58452ad786f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go
@@ -0,0 +1,261 @@
+// +build windows
+
+package windows
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "unsafe"
+
+ ansiterm "github.com/Azure/go-ansiterm"
+ "github.com/Azure/go-ansiterm/winterm"
+)
+
+const (
+ escapeSequence = ansiterm.KEY_ESC_CSI
+)
+
+// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation.
+type ansiReader struct {
+ file *os.File
+ fd uintptr
+ buffer []byte
+ cbBuffer int
+ command []byte
+}
+
+// NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a
+// Windows console input handle.
+func NewAnsiReader(nFile int) io.ReadCloser {
+ initLogger()
+ file, fd := winterm.GetStdFile(nFile)
+ return &ansiReader{
+ file: file,
+ fd: fd,
+ command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH),
+ buffer: make([]byte, 0),
+ }
+}
+
+// Close closes the wrapped file.
+func (ar *ansiReader) Close() (err error) {
+ return ar.file.Close()
+}
+
+// Fd returns the file descriptor of the wrapped file.
+func (ar *ansiReader) Fd() uintptr {
+ return ar.fd
+}
+
+// Read reads up to len(p) bytes of translated input events into p.
+func (ar *ansiReader) Read(p []byte) (int, error) {
+ if len(p) == 0 {
+ return 0, nil
+ }
+
+ // Previously read bytes exist, read as much as we can and return
+ if len(ar.buffer) > 0 {
+ logger.Debugf("Reading previously cached bytes")
+
+ originalLength := len(ar.buffer)
+ copiedLength := copy(p, ar.buffer)
+
+ if copiedLength == originalLength {
+ ar.buffer = make([]byte, 0, len(p))
+ } else {
+ ar.buffer = ar.buffer[copiedLength:]
+ }
+
+ logger.Debugf("Read from cache p[%d]: % x", copiedLength, p)
+ return copiedLength, nil
+ }
+
+ // Read and translate key events
+ events, err := readInputEvents(ar.fd, len(p))
+ if err != nil {
+ return 0, err
+ } else if len(events) == 0 {
+ logger.Debug("No input events detected")
+ return 0, nil
+ }
+
+ keyBytes := translateKeyEvents(events, []byte(escapeSequence))
+
+ // Save excess bytes and right-size keyBytes
+ if len(keyBytes) > len(p) {
+ logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p))
+ ar.buffer = keyBytes[len(p):]
+ keyBytes = keyBytes[:len(p)]
+ } else if len(keyBytes) == 0 {
+ logger.Debug("No key bytes returned from the translator")
+ return 0, nil
+ }
+
+ copiedLength := copy(p, keyBytes)
+ if copiedLength != len(keyBytes) {
+ return 0, errors.New("Unexpected copy length encountered.")
+ }
+
+ logger.Debugf("Read p[%d]: % x", copiedLength, p)
+ logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes)
+ return copiedLength, nil
+}
+
+// readInputEvents polls until at least one event is available.
+func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) {
+ // Determine the maximum number of records to retrieve
+ // -- Cast around the type system to obtain the size of a single INPUT_RECORD.
+ // unsafe.Sizeof requires an expression vs. a type-reference; the casting
+ // tricks the type system into believing it has such an expression.
+ recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes)))))
+ countRecords := maxBytes / recordSize
+ if countRecords > ansiterm.MAX_INPUT_EVENTS {
+ countRecords = ansiterm.MAX_INPUT_EVENTS
+ }
+ logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize)
+
+ // Wait for and read input events
+ events := make([]winterm.INPUT_RECORD, countRecords)
+ nEvents := uint32(0)
+ eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE)
+ if err != nil {
+ return nil, err
+ }
+
+ if eventsExist {
+ err = winterm.ReadConsoleInput(fd, events, &nEvents)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Return a slice restricted to the number of returned records
+ logger.Debugf("[windows] readInputEvents: Read %v events", nEvents)
+ return events[:nEvents], nil
+}
+
+// KeyEvent Translation Helpers
+
+var arrowKeyMapPrefix = map[uint16]string{
+ winterm.VK_UP: "%s%sA",
+ winterm.VK_DOWN: "%s%sB",
+ winterm.VK_RIGHT: "%s%sC",
+ winterm.VK_LEFT: "%s%sD",
+}
+
+var keyMapPrefix = map[uint16]string{
+ winterm.VK_UP: "\x1B[%sA",
+ winterm.VK_DOWN: "\x1B[%sB",
+ winterm.VK_RIGHT: "\x1B[%sC",
+ winterm.VK_LEFT: "\x1B[%sD",
+ winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1
+ winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4
+ winterm.VK_INSERT: "\x1B[2%s~",
+ winterm.VK_DELETE: "\x1B[3%s~",
+ winterm.VK_PRIOR: "\x1B[5%s~",
+ winterm.VK_NEXT: "\x1B[6%s~",
+ winterm.VK_F1: "",
+ winterm.VK_F2: "",
+ winterm.VK_F3: "\x1B[13%s~",
+ winterm.VK_F4: "\x1B[14%s~",
+ winterm.VK_F5: "\x1B[15%s~",
+ winterm.VK_F6: "\x1B[17%s~",
+ winterm.VK_F7: "\x1B[18%s~",
+ winterm.VK_F8: "\x1B[19%s~",
+ winterm.VK_F9: "\x1B[20%s~",
+ winterm.VK_F10: "\x1B[21%s~",
+ winterm.VK_F11: "\x1B[23%s~",
+ winterm.VK_F12: "\x1B[24%s~",
+}
+
+// translateKeyEvents converts the input events into the appropriate ANSI string.
+func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte {
+ var buffer bytes.Buffer
+ for _, event := range events {
+ if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 {
+ buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence))
+ }
+ }
+
+ return buffer.Bytes()
+}
+
+// keyToString maps the given input event record to the corresponding string.
+func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string {
+ if keyEvent.UnicodeChar == 0 {
+ return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence)
+ }
+
+ _, alt, control := getControlKeys(keyEvent.ControlKeyState)
+ if control {
+ // TODO(azlinux): Implement following control sequences
+ // -D Signals the end of input from the keyboard; also exits current shell.
+ // -H Deletes the first character to the left of the cursor. Also called the ERASE key.
+ // -Q Restarts printing after it has been stopped with -s.
+ // -S Suspends printing on the screen (does not stop the program).
+ // -U Deletes all characters on the current line. Also called the KILL key.
+ // -E Quits current command and creates a core
+
+ }
+
+ // +Key generates ESC N Key
+ if !control && alt {
+ return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar))
+ }
+
+ return string(keyEvent.UnicodeChar)
+}
+
+// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string.
+func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string {
+ shift, alt, control := getControlKeys(controlState)
+ modifier := getControlKeysModifier(shift, alt, control)
+
+ if format, ok := arrowKeyMapPrefix[key]; ok {
+ return fmt.Sprintf(format, escapeSequence, modifier)
+ }
+
+ if format, ok := keyMapPrefix[key]; ok {
+ return fmt.Sprintf(format, modifier)
+ }
+
+ return ""
+}
+
+// getControlKeys extracts the shift, alt, and ctrl key states.
+func getControlKeys(controlState uint32) (shift, alt, control bool) {
+ shift = 0 != (controlState & winterm.SHIFT_PRESSED)
+ alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED))
+ control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED))
+ return shift, alt, control
+}
+
+// getControlKeysModifier returns the ANSI modifier for the given combination of control keys.
+func getControlKeysModifier(shift, alt, control bool) string {
+ if shift && alt && control {
+ return ansiterm.KEY_CONTROL_PARAM_8
+ }
+ if alt && control {
+ return ansiterm.KEY_CONTROL_PARAM_7
+ }
+ if shift && control {
+ return ansiterm.KEY_CONTROL_PARAM_6
+ }
+ if control {
+ return ansiterm.KEY_CONTROL_PARAM_5
+ }
+ if shift && alt {
+ return ansiterm.KEY_CONTROL_PARAM_4
+ }
+ if alt {
+ return ansiterm.KEY_CONTROL_PARAM_3
+ }
+ if shift {
+ return ansiterm.KEY_CONTROL_PARAM_2
+ }
+ return ""
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go
new file mode 100644
index 00000000000..a3ce5697d95
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go
@@ -0,0 +1,64 @@
+// +build windows
+
+package windows
+
+import (
+ "io"
+ "os"
+
+ ansiterm "github.com/Azure/go-ansiterm"
+ "github.com/Azure/go-ansiterm/winterm"
+)
+
+// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation.
+type ansiWriter struct {
+ file *os.File
+ fd uintptr
+ infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO
+ command []byte
+ escapeSequence []byte
+ inAnsiSequence bool
+ parser *ansiterm.AnsiParser
+}
+
+// NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a
+// Windows console output handle.
+func NewAnsiWriter(nFile int) io.Writer {
+ initLogger()
+ file, fd := winterm.GetStdFile(nFile)
+ info, err := winterm.GetConsoleScreenBufferInfo(fd)
+ if err != nil {
+ return nil
+ }
+
+ parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file))
+ logger.Infof("newAnsiWriter: parser %p", parser)
+
+ aw := &ansiWriter{
+ file: file,
+ fd: fd,
+ infoReset: info,
+ command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH),
+ escapeSequence: []byte(ansiterm.KEY_ESC_CSI),
+ parser: parser,
+ }
+
+ logger.Infof("newAnsiWriter: aw.parser %p", aw.parser)
+ logger.Infof("newAnsiWriter: %v", aw)
+ return aw
+}
+
+func (aw *ansiWriter) Fd() uintptr {
+ return aw.fd
+}
+
+// Write writes len(p) bytes from p to the underlying data stream.
+func (aw *ansiWriter) Write(p []byte) (total int, err error) {
+ if len(p) == 0 {
+ return 0, nil
+ }
+
+ logger.Infof("Write: % x", p)
+ logger.Infof("Write: %s", string(p))
+ return aw.parser.Parse(p)
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/windows/console.go b/vendor/github.com/docker/docker/pkg/term/windows/console.go
new file mode 100644
index 00000000000..ca5c3b2e535
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/windows/console.go
@@ -0,0 +1,35 @@
+// +build windows
+
+package windows
+
+import (
+ "os"
+
+ "github.com/Azure/go-ansiterm/winterm"
+)
+
+// GetHandleInfo returns file descriptor and bool indicating whether the file is a console.
+func GetHandleInfo(in interface{}) (uintptr, bool) {
+ switch t := in.(type) {
+ case *ansiReader:
+ return t.Fd(), true
+ case *ansiWriter:
+ return t.Fd(), true
+ }
+
+ var inFd uintptr
+ var isTerminal bool
+
+ if file, ok := in.(*os.File); ok {
+ inFd = file.Fd()
+ isTerminal = IsConsole(inFd)
+ }
+ return inFd, isTerminal
+}
+
+// IsConsole returns true if the given file descriptor is a Windows Console.
+// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console.
+func IsConsole(fd uintptr) bool {
+ _, e := winterm.GetConsoleMode(fd)
+ return e == nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/windows/windows.go b/vendor/github.com/docker/docker/pkg/term/windows/windows.go
new file mode 100644
index 00000000000..ce4cb5990ee
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/windows/windows.go
@@ -0,0 +1,33 @@
+// These files implement ANSI-aware input and output streams for use by the Docker Windows client.
+// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create
+// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls.
+
+package windows
+
+import (
+ "io/ioutil"
+ "os"
+ "sync"
+
+ ansiterm "github.com/Azure/go-ansiterm"
+ "github.com/Sirupsen/logrus"
+)
+
+var logger *logrus.Logger
+var initOnce sync.Once
+
+func initLogger() {
+ initOnce.Do(func() {
+ logFile := ioutil.Discard
+
+ if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" {
+ logFile, _ = os.Create("ansiReaderWriter.log")
+ }
+
+ logger = &logrus.Logger{
+ Out: logFile,
+ Formatter: new(logrus.TextFormatter),
+ Level: logrus.DebugLevel,
+ }
+ })
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/windows/windows_test.go b/vendor/github.com/docker/docker/pkg/term/windows/windows_test.go
new file mode 100644
index 00000000000..52aeab54ec9
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/windows/windows_test.go
@@ -0,0 +1,3 @@
+// This file is necessary to pass the Docker tests.
+
+package windows
diff --git a/vendor/github.com/docker/docker/pkg/testutil/assert/assert.go b/vendor/github.com/docker/docker/pkg/testutil/assert/assert.go
new file mode 100644
index 00000000000..5b0dcce67af
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/testutil/assert/assert.go
@@ -0,0 +1,70 @@
+// Package assert contains functions for making assertions in unit tests
+package assert
+
+import (
+ "fmt"
+ "path/filepath"
+ "runtime"
+ "strings"
+)
+
+// TestingT is an interface which defines the methods of testing.T that are
+// required by this package
+type TestingT interface {
+ Fatalf(string, ...interface{})
+}
+
+// Equal compare the actual value to the expected value and fails the test if
+// they are not equal.
+func Equal(t TestingT, actual, expected interface{}) {
+ if expected != actual {
+ fatal(t, fmt.Sprintf("Expected '%v' (%T) got '%v' (%T)", expected, expected, actual, actual))
+ }
+}
+
+//EqualStringSlice compares two slices and fails the test if they do not contain
+// the same items.
+func EqualStringSlice(t TestingT, actual, expected []string) {
+ if len(actual) != len(expected) {
+ t.Fatalf("Expected (length %d): %q\nActual (length %d): %q",
+ len(expected), expected, len(actual), actual)
+ }
+ for i, item := range actual {
+ if item != expected[i] {
+ t.Fatalf("Slices differ at element %d, expected %q got %q",
+ i, expected[i], item)
+ }
+ }
+}
+
+// NilError asserts that the error is nil, otherwise it fails the test.
+func NilError(t TestingT, err error) {
+ if err != nil {
+ fatal(t, fmt.Sprintf("Expected no error, got: %s", err.Error()))
+ }
+}
+
+// Error asserts that error is not nil, and contains the expected text,
+// otherwise it fails the test.
+func Error(t TestingT, err error, contains string) {
+ if err == nil {
+ fatal(t, "Expected an error, but error was nil")
+ }
+
+ if !strings.Contains(err.Error(), contains) {
+ fatal(t, fmt.Sprintf("Expected error to contain '%s', got '%s'", contains, err.Error()))
+ }
+}
+
+// Contains asserts that the string contains a substring, otherwise it fails the
+// test.
+func Contains(t TestingT, actual, contains string) {
+ if !strings.Contains(actual, contains) {
+ fatal(t, fmt.Sprintf("Expected '%s' to contain '%s'", actual, contains))
+ }
+}
+
+func fatal(t TestingT, msg string) {
+ _, file, line, _ := runtime.Caller(2)
+ t.Fatalf("%s:%d: %s", filepath.Base(file), line, msg)
+}
diff --git a/vendor/github.com/docker/docker/pkg/testutil/pkg.go b/vendor/github.com/docker/docker/pkg/testutil/pkg.go
new file mode 100644
index 00000000000..110b2e6a797
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/testutil/pkg.go
@@ -0,0 +1 @@
+package testutil
diff --git a/vendor/github.com/docker/docker/pkg/tlsconfig/config.go b/vendor/github.com/docker/docker/pkg/tlsconfig/config.go
new file mode 100644
index 00000000000..e3dfad1f0e7
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/tlsconfig/config.go
@@ -0,0 +1,133 @@
+// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers.
+//
+// As a reminder from https://golang.org/pkg/crypto/tls/#Config:
+// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified.
+// A Config may be reused; the tls package will also not modify it.
+package tlsconfig
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ "github.com/Sirupsen/logrus"
+)
+
+// Options represents the information needed to create client and server TLS configurations.
+type Options struct {
+ CAFile string
+
+ // If either CertFile or KeyFile is empty, Client() will not load them
+ // preventing the client from authenticating to the server.
+ // However, Server() requires them and will error out if they are empty.
+ CertFile string
+ KeyFile string
+
+ // client-only option
+ InsecureSkipVerify bool
+ // server-only option
+ ClientAuth tls.ClientAuthType
+}
+
+// Extra (server-side) accepted CBC cipher suites - will phase out in the future
+var acceptedCBCCiphers = []uint16{
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_128_CBC_SHA,
+}
+
+// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set)
+var clientCipherSuites = []uint16{
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+ tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+}
+
+// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls
+// options struct but wants to use a commonly accepted set of TLS cipher suites, with
+// known weak algorithms removed.
+var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...)
+
+// ServerDefault is a secure-enough TLS configuration for the server TLS configuration.
+var ServerDefault = tls.Config{
+ // Avoid fallback to SSL protocols < TLS1.0
+ MinVersion: tls.VersionTLS10,
+ PreferServerCipherSuites: true,
+ CipherSuites: DefaultServerAcceptedCiphers,
+}
+
+// ClientDefault is a secure-enough TLS configuration for the client TLS configuration.
+var ClientDefault = tls.Config{
+ // Prefer TLS1.2 as the client minimum
+ MinVersion: tls.VersionTLS12,
+ CipherSuites: clientCipherSuites,
+}
+
+// certPool returns an X.509 certificate pool from `caFile`, the certificate file.
+func certPool(caFile string) (*x509.CertPool, error) {
+ // If we should verify the server, we need to load a trusted ca
+ certPool := x509.NewCertPool()
+ pem, err := ioutil.ReadFile(caFile)
+ if err != nil {
+ return nil, fmt.Errorf("Could not read CA certificate %q: %v", caFile, err)
+ }
+ if !certPool.AppendCertsFromPEM(pem) {
+ return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile)
+ }
+ s := certPool.Subjects()
+ subjects := make([]string, len(s))
+ for i, subject := range s {
+ subjects[i] = string(subject)
+ }
+ logrus.Debugf("Trusting certs with subjects: %v", subjects)
+ return certPool, nil
+}
+
+// Client returns a TLS configuration meant to be used by a client.
+func Client(options Options) (*tls.Config, error) {
+ tlsConfig := ClientDefault
+ tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify
+ if !options.InsecureSkipVerify {
+ CAs, err := certPool(options.CAFile)
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig.RootCAs = CAs
+ }
+
+ if options.CertFile != "" && options.KeyFile != "" {
+ tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile)
+ if err != nil {
+ return nil, fmt.Errorf("Could not load X509 key pair: %v. Make sure the key is not encrypted", err)
+ }
+ tlsConfig.Certificates = []tls.Certificate{tlsCert}
+ }
+
+ return &tlsConfig, nil
+}
+
+// Server returns a TLS configuration meant to be used by a server.
+func Server(options Options) (*tls.Config, error) {
+ tlsConfig := ServerDefault
+ tlsConfig.ClientAuth = options.ClientAuth
+ tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err)
+ }
+ return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err)
+ }
+ tlsConfig.Certificates = []tls.Certificate{tlsCert}
+ if options.ClientAuth >= tls.VerifyClientCertIfGiven {
+ CAs, err := certPool(options.CAFile)
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig.ClientCAs = CAs
+ }
+ return &tlsConfig, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go b/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go
new file mode 100644
index 00000000000..02610b8b7e2
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go
@@ -0,0 +1,137 @@
+// Package truncindex provides a general 'index tree', used by Docker
+// in order to be able to reference containers by only a few unambiguous
+// characters of their id.
+package truncindex
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+
+ "github.com/tchap/go-patricia/patricia"
+)
+
+var (
+ // ErrEmptyPrefix is an error returned if the prefix was empty.
+ ErrEmptyPrefix = errors.New("Prefix can't be empty")
+
+ // ErrIllegalChar is returned when a space is in the ID
+ ErrIllegalChar = errors.New("illegal character: ' '")
+
+ // ErrNotExist is returned when ID or its prefix not found in index.
+ ErrNotExist = errors.New("ID does not exist")
+)
+
+// ErrAmbiguousPrefix is returned if the prefix was ambiguous
+// (multiple ids for the prefix).
+type ErrAmbiguousPrefix struct {
+ prefix string
+}
+
+func (e ErrAmbiguousPrefix) Error() string {
+ return fmt.Sprintf("Multiple IDs found with provided prefix: %s", e.prefix)
+}
+
+// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes.
+// This is used to retrieve image and container IDs by more convenient shorthand prefixes.
+type TruncIndex struct {
+ sync.RWMutex
+ trie *patricia.Trie
+ ids map[string]struct{}
+}
+
+// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs.
+func NewTruncIndex(ids []string) (idx *TruncIndex) {
+ idx = &TruncIndex{
+ ids: make(map[string]struct{}),
+
+ // Change patricia max prefix per node length,
+ // because our len(ID) always 64
+ trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)),
+ }
+ for _, id := range ids {
+ idx.addID(id)
+ }
+ return
+}
+
+func (idx *TruncIndex) addID(id string) error {
+ if strings.Contains(id, " ") {
+ return ErrIllegalChar
+ }
+ if id == "" {
+ return ErrEmptyPrefix
+ }
+ if _, exists := idx.ids[id]; exists {
+ return fmt.Errorf("id already exists: '%s'", id)
+ }
+ idx.ids[id] = struct{}{}
+ if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted {
+ return fmt.Errorf("failed to insert id: %s", id)
+ }
+ return nil
+}
+
+// Add adds a new ID to the TruncIndex.
+func (idx *TruncIndex) Add(id string) error {
+ idx.Lock()
+ defer idx.Unlock()
+ if err := idx.addID(id); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Delete removes an ID from the TruncIndex. If there are multiple IDs
+// with the given prefix, an error is thrown.
+func (idx *TruncIndex) Delete(id string) error {
+ idx.Lock()
+ defer idx.Unlock()
+ if _, exists := idx.ids[id]; !exists || id == "" {
+ return fmt.Errorf("no such id: '%s'", id)
+ }
+ delete(idx.ids, id)
+ if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted {
+ return fmt.Errorf("no such id: '%s'", id)
+ }
+ return nil
+}
+
+// Get retrieves an ID from the TruncIndex. If there are multiple IDs
+// with the given prefix, an error is thrown.
+func (idx *TruncIndex) Get(s string) (string, error) {
+ if s == "" {
+ return "", ErrEmptyPrefix
+ }
+ var (
+ id string
+ )
+ subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error {
+ if id != "" {
+ // we haven't found the ID if there are two or more IDs
+ id = ""
+ return ErrAmbiguousPrefix{prefix: string(prefix)}
+ }
+ id = string(prefix)
+ return nil
+ }
+
+ idx.RLock()
+ defer idx.RUnlock()
+ if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil {
+ return "", err
+ }
+ if id != "" {
+ return id, nil
+ }
+ return "", ErrNotExist
+}
+
+// Iterate iterates over all stored IDs, and passes each of them to the given handler.
+func (idx *TruncIndex) Iterate(handler func(id string)) {
+ idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error {
+ handler(string(prefix))
+ return nil
+ })
+}
diff --git a/vendor/github.com/docker/docker/pkg/truncindex/truncindex_test.go b/vendor/github.com/docker/docker/pkg/truncindex/truncindex_test.go
new file mode 100644
index 00000000000..8197baf7d47
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/truncindex/truncindex_test.go
@@ -0,0 +1,429 @@
+package truncindex
+
+import (
+ "math/rand"
+ "testing"
+
+ "github.com/docker/docker/pkg/stringid"
+)
+
+// Test the behavior of TruncIndex, an index for querying IDs from a non-conflicting prefix.
+func TestTruncIndex(t *testing.T) {
+ ids := []string{}
+ index := NewTruncIndex(ids)
+ // Get on an empty index
+ if _, err := index.Get("foobar"); err == nil {
+ t.Fatal("Get on an empty index should return an error")
+ }
+
+ // Spaces should be illegal in an id
+ if err := index.Add("I have a space"); err == nil {
+ t.Fatalf("Adding an id with ' ' should return an error")
+ }
+
+ id := "99b36c2c326ccc11e726eee6ee78a0baf166ef96"
+ // Add an id
+ if err := index.Add(id); err != nil {
+ t.Fatal(err)
+ }
+
+ // Add an empty id (should fail)
+ if err := index.Add(""); err == nil {
+ t.Fatalf("Adding an empty id should return an error")
+ }
+
+ // Get a non-existing id
+ assertIndexGet(t, index, "abracadabra", "", true)
+ // Get an empty id
+ assertIndexGet(t, index, "", "", true)
+ // Get the exact id
+ assertIndexGet(t, index, id, id, false)
+ // The first letter should match
+ assertIndexGet(t, index, id[:1], id, false)
+ // The first half should match
+ assertIndexGet(t, index, id[:len(id)/2], id, false)
+ // The second half should NOT match
+ assertIndexGet(t, index, id[len(id)/2:], "", true)
+
+ id2 := id[:6] + "blabla"
+ // Add an id
+ if err := index.Add(id2); err != nil {
+ t.Fatal(err)
+ }
+ // Both exact IDs should work
+ assertIndexGet(t, index, id, id, false)
+ assertIndexGet(t, index, id2, id2, false)
+
+ // 6 characters or less should conflict
+ assertIndexGet(t, index, id[:6], "", true)
+ assertIndexGet(t, index, id[:4], "", true)
+ assertIndexGet(t, index, id[:1], "", true)
+
+ // An ambiguous id prefix should return an error
+ if _, err := index.Get(id[:4]); err == nil {
+ t.Fatal("An ambiguous id prefix should return an error")
+ }
+
+ // 7 characters should NOT conflict
+ assertIndexGet(t, index, id[:7], id, false)
+ assertIndexGet(t, index, id2[:7], id2, false)
+
+ // Deleting a non-existing id should return an error
+ if err := index.Delete("non-existing"); err == nil {
+ t.Fatalf("Deleting a non-existing id should return an error")
+ }
+
+ // Deleting an empty id should return an error
+ if err := index.Delete(""); err == nil {
+ t.Fatal("Deleting an empty id should return an error")
+ }
+
+ // Deleting id2 should remove conflicts
+ if err := index.Delete(id2); err != nil {
+ t.Fatal(err)
+ }
+ // id2 should no longer work
+ assertIndexGet(t, index, id2, "", true)
+ assertIndexGet(t, index, id2[:7], "", true)
+ assertIndexGet(t, index, id2[:11], "", true)
+
+ // conflicts between id and id2 should be gone
+ assertIndexGet(t, index, id[:6], id, false)
+ assertIndexGet(t, index, id[:4], id, false)
+ assertIndexGet(t, index, id[:1], id, false)
+
+ // non-conflicting substrings should still not conflict
+ assertIndexGet(t, index, id[:7], id, false)
+ assertIndexGet(t, index, id[:15], id, false)
+ assertIndexGet(t, index, id, id, false)
+
+ assertIndexIterate(t)
+}
+
+func assertIndexIterate(t *testing.T) {
+ ids := []string{
+ "19b36c2c326ccc11e726eee6ee78a0baf166ef96",
+ "28b36c2c326ccc11e726eee6ee78a0baf166ef96",
+ "37b36c2c326ccc11e726eee6ee78a0baf166ef96",
+ "46b36c2c326ccc11e726eee6ee78a0baf166ef96",
+ }
+
+ index := NewTruncIndex(ids)
+
+ index.Iterate(func(targetId string) {
+ for _, id := range ids {
+ if targetId == id {
+ return
+ }
+ }
+
+ t.Fatalf("An unknown ID '%s'", targetId)
+ })
+}
+
+func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult string, expectError bool) {
+ if result, err := index.Get(input); err != nil && !expectError {
+ t.Fatalf("Unexpected error getting '%s': %s", input, err)
+ } else if err == nil && expectError {
+ t.Fatalf("Getting '%s' should return an error, not '%s'", input, result)
+ } else if result != expectedResult {
+ t.Fatalf("Getting '%s' returned '%s' instead of '%s'", input, result, expectedResult)
+ }
+}
+
+func BenchmarkTruncIndexAdd100(b *testing.B) {
+ var testSet []string
+ for i := 0; i < 100; i++ {
+ testSet = append(testSet, stringid.GenerateNonCryptoID())
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ index := NewTruncIndex([]string{})
+ for _, id := range testSet {
+ if err := index.Add(id); err != nil {
+ b.Fatal(err)
+ }
+ }
+ }
+}
+
+func BenchmarkTruncIndexAdd250(b *testing.B) {
+ var testSet []string
+ for i := 0; i < 250; i++ {
+ testSet = append(testSet, stringid.GenerateNonCryptoID())
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ index := NewTruncIndex([]string{})
+ for _, id := range testSet {
+ if err := index.Add(id); err != nil {
+ b.Fatal(err)
+ }
+ }
+ }
+}
+
+func BenchmarkTruncIndexAdd500(b *testing.B) {
+ var testSet []string
+ for i := 0; i < 500; i++ {
+ testSet = append(testSet, stringid.GenerateNonCryptoID())
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ index := NewTruncIndex([]string{})
+ for _, id := range testSet {
+ if err := index.Add(id); err != nil {
+ b.Fatal(err)
+ }
+ }
+ }
+}
+
+func BenchmarkTruncIndexGet100(b *testing.B) {
+ var testSet []string
+ var testKeys []string
+ for i := 0; i < 100; i++ {
+ testSet = append(testSet, stringid.GenerateNonCryptoID())
+ }
+ index := NewTruncIndex([]string{})
+ for _, id := range testSet {
+ if err := index.Add(id); err != nil {
+ b.Fatal(err)
+ }
+ l := rand.Intn(12) + 12
+ testKeys = append(testKeys, id[:l])
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ for _, id := range testKeys {
+ if res, err := index.Get(id); err != nil {
+ b.Fatal(res, err)
+ }
+ }
+ }
+}
+
+func BenchmarkTruncIndexGet250(b *testing.B) {
+ var testSet []string
+ var testKeys []string
+ for i := 0; i < 250; i++ {
+ testSet = append(testSet, stringid.GenerateNonCryptoID())
+ }
+ index := NewTruncIndex([]string{})
+ for _, id := range testSet {
+ if err := index.Add(id); err != nil {
+ b.Fatal(err)
+ }
+ l := rand.Intn(12) + 12
+ testKeys = append(testKeys, id[:l])
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ for _, id := range testKeys {
+ if res, err := index.Get(id); err != nil {
+ b.Fatal(res, err)
+ }
+ }
+ }
+}
+
+func BenchmarkTruncIndexGet500(b *testing.B) {
+ var testSet []string
+ var testKeys []string
+ for i := 0; i < 500; i++ {
+ testSet = append(testSet, stringid.GenerateNonCryptoID())
+ }
+ index := NewTruncIndex([]string{})
+ for _, id := range testSet {
+ if err := index.Add(id); err != nil {
+ b.Fatal(err)
+ }
+ l := rand.Intn(12) + 12
+ testKeys = append(testKeys, id[:l])
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ for _, id := range testKeys {
+ if res, err := index.Get(id); err != nil {
+ b.Fatal(res, err)
+ }
+ }
+ }
+}
+
+func BenchmarkTruncIndexDelete100(b *testing.B) {
+ var testSet []string
+ for i := 0; i < 100; i++ {
+ testSet = append(testSet, stringid.GenerateNonCryptoID())
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ b.StopTimer()
+ index := NewTruncIndex([]string{})
+ for _, id := range testSet {
+ if err := index.Add(id); err != nil {
+ b.Fatal(err)
+ }
+ }
+ b.StartTimer()
+ for _, id := range testSet {
+ if err := index.Delete(id); err != nil {
+ b.Fatal(err)
+ }
+ }
+ }
+}
+
+func BenchmarkTruncIndexDelete250(b *testing.B) {
+ var testSet []string
+ for i := 0; i < 250; i++ {
+ testSet = append(testSet, stringid.GenerateNonCryptoID())
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ b.StopTimer()
+ index := NewTruncIndex([]string{})
+ for _, id := range testSet {
+ if err := index.Add(id); err != nil {
+ b.Fatal(err)
+ }
+ }
+ b.StartTimer()
+ for _, id := range testSet {
+ if err := index.Delete(id); err != nil {
+ b.Fatal(err)
+ }
+ }
+ }
+}
+
+func BenchmarkTruncIndexDelete500(b *testing.B) {
+ var testSet []string
+ for i := 0; i < 500; i++ {
+ testSet = append(testSet, stringid.GenerateNonCryptoID())
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ b.StopTimer()
+ index := NewTruncIndex([]string{})
+ for _, id := range testSet {
+ if err := index.Add(id); err != nil {
+ b.Fatal(err)
+ }
+ }
+ b.StartTimer()
+ for _, id := range testSet {
+ if err := index.Delete(id); err != nil {
+ b.Fatal(err)
+ }
+ }
+ }
+}
+
+func BenchmarkTruncIndexNew100(b *testing.B) {
+ var testSet []string
+ for i := 0; i < 100; i++ {
+ testSet = append(testSet, stringid.GenerateNonCryptoID())
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ NewTruncIndex(testSet)
+ }
+}
+
+func BenchmarkTruncIndexNew250(b *testing.B) {
+ var testSet []string
+ for i := 0; i < 250; i++ {
+ testSet = append(testSet, stringid.GenerateNonCryptoID())
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ NewTruncIndex(testSet)
+ }
+}
+
+func BenchmarkTruncIndexNew500(b *testing.B) {
+ var testSet []string
+ for i := 0; i < 500; i++ {
+ testSet = append(testSet, stringid.GenerateNonCryptoID())
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ NewTruncIndex(testSet)
+ }
+}
+
+func BenchmarkTruncIndexAddGet100(b *testing.B) {
+ var testSet []string
+ var testKeys []string
+ for i := 0; i < 500; i++ {
+ id := stringid.GenerateNonCryptoID()
+ testSet = append(testSet, id)
+ l := rand.Intn(12) + 12
+ testKeys = append(testKeys, id[:l])
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ index := NewTruncIndex([]string{})
+ for _, id := range testSet {
+ if err := index.Add(id); err != nil {
+ b.Fatal(err)
+ }
+ }
+ for _, id := range testKeys {
+ if res, err := index.Get(id); err != nil {
+ b.Fatal(res, err)
+ }
+ }
+ }
+}
+
+func BenchmarkTruncIndexAddGet250(b *testing.B) {
+ var testSet []string
+ var testKeys []string
+ for i := 0; i < 500; i++ {
+ id := stringid.GenerateNonCryptoID()
+ testSet = append(testSet, id)
+ l := rand.Intn(12) + 12
+ testKeys = append(testKeys, id[:l])
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ index := NewTruncIndex([]string{})
+ for _, id := range testSet {
+ if err := index.Add(id); err != nil {
+ b.Fatal(err)
+ }
+ }
+ for _, id := range testKeys {
+ if res, err := index.Get(id); err != nil {
+ b.Fatal(res, err)
+ }
+ }
+ }
+}
+
+func BenchmarkTruncIndexAddGet500(b *testing.B) {
+ var testSet []string
+ var testKeys []string
+ for i := 0; i < 500; i++ {
+ id := stringid.GenerateNonCryptoID()
+ testSet = append(testSet, id)
+ l := rand.Intn(12) + 12
+ testKeys = append(testKeys, id[:l])
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ index := NewTruncIndex([]string{})
+ for _, id := range testSet {
+ if err := index.Add(id); err != nil {
+ b.Fatal(err)
+ }
+ }
+ for _, id := range testKeys {
+ if res, err := index.Get(id); err != nil {
+ b.Fatal(res, err)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go b/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go
new file mode 100644
index 00000000000..44152873b1f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go
@@ -0,0 +1,50 @@
+// Package urlutil provides helper function to check urls kind.
+// It supports http urls, git urls and transport url (tcp://, …)
+package urlutil
+
+import (
+ "regexp"
+ "strings"
+)
+
+var (
+ validPrefixes = map[string][]string{
+ "url": {"http://", "https://"},
+ "git": {"git://", "github.com/", "git@"},
+ "transport": {"tcp://", "tcp+tls://", "udp://", "unix://", "unixgram://"},
+ }
+ urlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$")
+)
+
+// IsURL returns true if the provided str is an HTTP(S) URL.
+func IsURL(str string) bool {
+ return checkURL(str, "url")
+}
+
+// IsGitURL returns true if the provided str is a git repository URL.
+func IsGitURL(str string) bool {
+ if IsURL(str) && urlPathWithFragmentSuffix.MatchString(str) {
+ return true
+ }
+ return checkURL(str, "git")
+}
+
+// IsGitTransport returns true if the provided str is a git transport by inspecting
+// the prefix of the string for known protocols used in git.
+func IsGitTransport(str string) bool {
+ return IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@")
+}
+
+// IsTransportURL returns true if the provided str is a transport (tcp, tcp+tls, udp, unix) URL.
+func IsTransportURL(str string) bool {
+ return checkURL(str, "transport")
+}
+
+func checkURL(str, kind string) bool {
+ for _, prefix := range validPrefixes[kind] {
+ if strings.HasPrefix(str, prefix) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/docker/docker/pkg/urlutil/urlutil_test.go b/vendor/github.com/docker/docker/pkg/urlutil/urlutil_test.go
new file mode 100644
index 00000000000..75eb464fe51
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/urlutil/urlutil_test.go
@@ -0,0 +1,70 @@
+package urlutil
+
+import "testing"
+
+var (
+ gitUrls = []string{
+ "git://github.com/docker/docker",
+ "git@github.com:docker/docker.git",
+ "git@bitbucket.org:atlassianlabs/atlassian-docker.git",
+ "https://github.com/docker/docker.git",
+ "http://github.com/docker/docker.git",
+ "http://github.com/docker/docker.git#branch",
+ "http://github.com/docker/docker.git#:dir",
+ }
+ incompleteGitUrls = []string{
+ "github.com/docker/docker",
+ }
+ invalidGitUrls = []string{
+ "http://github.com/docker/docker.git:#branch",
+ }
+ transportUrls = []string{
+ "tcp://example.com",
+ "tcp+tls://example.com",
+ "udp://example.com",
+ "unix:///example",
+ "unixgram:///example",
+ }
+)
+
+func TestValidGitTransport(t *testing.T) {
+ for _, url := range gitUrls {
+ if IsGitTransport(url) == false {
+ t.Fatalf("%q should be detected as valid Git prefix", url)
+ }
+ }
+
+ for _, url := range incompleteGitUrls {
+ if IsGitTransport(url) == true {
+ t.Fatalf("%q should not be detected as valid Git prefix", url)
+ }
+ }
+}
+
+func TestIsGIT(t *testing.T) {
+ for _, url := range gitUrls {
+ if IsGitURL(url) == false {
+ t.Fatalf("%q should be detected as valid Git url", url)
+ }
+ }
+
+ for _, url := range incompleteGitUrls {
+ if IsGitURL(url) == false {
+ t.Fatalf("%q should be detected as valid Git url", url)
+ }
+ }
+
+ for _, url := range invalidGitUrls {
+ if IsGitURL(url) == true {
+ t.Fatalf("%q should not be detected as valid Git prefix", url)
+ }
+ }
+}
+
+func TestIsTransport(t *testing.T) {
+ for _, url := range transportUrls {
+ if IsTransportURL(url) == false {
+ t.Fatalf("%q should be detected as valid Transport url", url)
+ }
+ }
+}
diff --git a/vendor/github.com/docker/docker/pkg/useragent/README.md b/vendor/github.com/docker/docker/pkg/useragent/README.md
new file mode 100644
index 00000000000..d9cb367d109
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/useragent/README.md
@@ -0,0 +1 @@
+This package provides helper functions to pack version information into a single User-Agent header.
diff --git a/vendor/github.com/docker/docker/pkg/useragent/useragent.go b/vendor/github.com/docker/docker/pkg/useragent/useragent.go
new file mode 100644
index 00000000000..1137db51b89
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/useragent/useragent.go
@@ -0,0 +1,55 @@
+// Package useragent provides helper functions to pack
+// version information into a single User-Agent header.
+package useragent
+
+import (
+ "strings"
+)
+
+// VersionInfo is used to model UserAgent versions.
+type VersionInfo struct {
+ Name string
+ Version string
+}
+
+func (vi *VersionInfo) isValid() bool {
+ const stopChars = " \t\r\n/"
+ name := vi.Name
+ vers := vi.Version
+ if len(name) == 0 || strings.ContainsAny(name, stopChars) {
+ return false
+ }
+ if len(vers) == 0 || strings.ContainsAny(vers, stopChars) {
+ return false
+ }
+ return true
+}
+
+// AppendVersions converts versions to a string and appends the string to the string base.
+//
+// Each VersionInfo will be converted to a string in the format of
+// "product/version", where the "product" is get from the name field, while
+// version is get from the version field. Several pieces of version information
+// will be concatenated and separated by space.
+//
+// Example:
+// AppendVersions("base", VersionInfo{"foo", "1.0"}, VersionInfo{"bar", "2.0"})
+// results in "base foo/1.0 bar/2.0".
+func AppendVersions(base string, versions ...VersionInfo) string {
+ if len(versions) == 0 {
+ return base
+ }
+
+ verstrs := make([]string, 0, 1+len(versions))
+ if len(base) > 0 {
+ verstrs = append(verstrs, base)
+ }
+
+ for _, v := range versions {
+ if !v.isValid() {
+ continue
+ }
+ verstrs = append(verstrs, v.Name+"/"+v.Version)
+ }
+ return strings.Join(verstrs, " ")
+}
diff --git a/vendor/github.com/docker/docker/pkg/useragent/useragent_test.go b/vendor/github.com/docker/docker/pkg/useragent/useragent_test.go
new file mode 100644
index 00000000000..0ad7243a6d1
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/useragent/useragent_test.go
@@ -0,0 +1,31 @@
+package useragent
+
+import "testing"
+
+func TestVersionInfo(t *testing.T) {
+ vi := VersionInfo{"foo", "bar"}
+ if !vi.isValid() {
+ t.Fatalf("VersionInfo should be valid")
+ }
+ vi = VersionInfo{"", "bar"}
+ if vi.isValid() {
+ t.Fatalf("Expected VersionInfo to be invalid")
+ }
+ vi = VersionInfo{"foo", ""}
+ if vi.isValid() {
+ t.Fatalf("Expected VersionInfo to be invalid")
+ }
+}
+
+func TestAppendVersions(t *testing.T) {
+ vis := []VersionInfo{
+ {"foo", "1.0"},
+ {"bar", "0.1"},
+ {"pi", "3.1.4"},
+ }
+ v := AppendVersions("base", vis...)
+ expect := "base foo/1.0 bar/0.1 pi/3.1.4"
+ if v != expect {
+ t.Fatalf("expected %q, got %q", expect, v)
+ }
+}
diff --git a/vendor/github.com/docker/docker/volume/volume.go b/vendor/github.com/docker/docker/volume/volume.go
new file mode 100644
index 00000000000..988cc142565
--- /dev/null
+++ b/vendor/github.com/docker/docker/volume/volume.go
@@ -0,0 +1,190 @@
+package volume
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "syscall"
+
+ "github.com/docker/docker/pkg/stringid"
+ "github.com/docker/docker/pkg/system"
+ "github.com/opencontainers/runc/libcontainer/label"
+)
+
+// DefaultDriverName is the driver name used for the driver
+// implemented in the local package.
+const DefaultDriverName = "local"
+
+// Scopes define if a volume has is cluster-wide (global) or local only.
+// Scopes are returned by the volume driver when it is queried for capabilities and then set on a volume
+const (
+ LocalScope = "local"
+ GlobalScope = "global"
+)
+
+// Driver is for creating and removing volumes.
+type Driver interface {
+ // Name returns the name of the volume driver.
+ Name() string
+ // Create makes a new volume with the given id.
+ Create(name string, opts map[string]string) (Volume, error)
+ // Remove deletes the volume.
+ Remove(vol Volume) (err error)
+ // List lists all the volumes the driver has
+ List() ([]Volume, error)
+ // Get retrieves the volume with the requested name
+ Get(name string) (Volume, error)
+ // Scope returns the scope of the driver (e.g. `global` or `local`).
+ // Scope determines how the driver is handled at a cluster level
+ Scope() string
+}
+
+// Capability defines a set of capabilities that a driver is able to handle.
+type Capability struct {
+ // Scope is the scope of the driver, `global` or `local`
+ // A `global` scope indicates that the driver manages volumes across the cluster
+ // A `local` scope indicates that the driver only manages volumes resources local to the host
+ // Scope is declared by the driver
+ Scope string
+}
+
+// Volume is a place to store data. It is backed by a specific driver, and can be mounted.
+type Volume interface {
+ // Name returns the name of the volume
+ Name() string
+ // DriverName returns the name of the driver which owns this volume.
+ DriverName() string
+ // Path returns the absolute path to the volume.
+ Path() string
+ // Mount mounts the volume and returns the absolute path to
+ // where it can be consumed.
+ Mount(id string) (string, error)
+ // Unmount unmounts the volume when it is no longer in use.
+ Unmount(id string) error
+ // Status returns low-level status information about a volume
+ Status() map[string]interface{}
+}
+
+// LabeledVolume wraps a Volume with user-defined labels
+type LabeledVolume interface {
+ Labels() map[string]string
+ Volume
+}
+
+// ScopedVolume wraps a volume with a cluster scope (e.g., `local` or `global`)
+type ScopedVolume interface {
+ Scope() string
+ Volume
+}
+
+// MountPoint is the intersection point between a volume and a container. It
+// specifies which volume is to be used and where inside a container it should
+// be mounted.
+type MountPoint struct {
+ Source string // Container host directory
+ Destination string // Inside the container
+ RW bool // True if writable
+ Name string // Name set by user
+ Driver string // Volume driver to use
+ Volume Volume `json:"-"`
+
+ // Note Mode is not used on Windows
+ Mode string `json:"Relabel"` // Originally field was `Relabel`"
+
+ // Note Propagation is not used on Windows
+ Propagation string // Mount propagation string
+ Named bool // specifies if the mountpoint was specified by name
+
+ // Specifies if data should be copied from the container before the first mount
+ // Use a pointer here so we can tell if the user set this value explicitly
+ // This allows us to error out when the user explicitly enabled copy but we can't copy due to the volume being populated
+ CopyData bool `json:"-"`
+ // ID is the opaque ID used to pass to the volume driver.
+ // This should be set by calls to `Mount` and unset by calls to `Unmount`
+ ID string
+}
+
+// Setup sets up a mount point by either mounting the volume if it is
+// configured, or creating the source directory if supplied.
+func (m *MountPoint) Setup(mountLabel string) (string, error) {
+ if m.Volume != nil {
+ if m.ID == "" {
+ m.ID = stringid.GenerateNonCryptoID()
+ }
+ return m.Volume.Mount(m.ID)
+ }
+ if len(m.Source) == 0 {
+ return "", fmt.Errorf("Unable to setup mount point, neither source nor volume defined")
+ }
+ // system.MkdirAll() produces an error if m.Source exists and is a file (not a directory),
+ if err := system.MkdirAll(m.Source, 0755); err != nil {
+ if perr, ok := err.(*os.PathError); ok {
+ if perr.Err != syscall.ENOTDIR {
+ return "", err
+ }
+ }
+ }
+ if label.RelabelNeeded(m.Mode) {
+ if err := label.Relabel(m.Source, mountLabel, label.IsShared(m.Mode)); err != nil {
+ return "", err
+ }
+ }
+ return m.Source, nil
+}
+
+// Path returns the path of a volume in a mount point.
+func (m *MountPoint) Path() string {
+ if m.Volume != nil {
+ return m.Volume.Path()
+ }
+ return m.Source
+}
+
+// Type returns the type of mount point
+func (m *MountPoint) Type() string {
+ if m.Name != "" {
+ return "volume"
+ }
+ if m.Source != "" {
+ return "bind"
+ }
+ return "ephemeral"
+}
+
+// ParseVolumesFrom ensures that the supplied volumes-from is valid.
+func ParseVolumesFrom(spec string) (string, string, error) {
+ if len(spec) == 0 {
+ return "", "", fmt.Errorf("malformed volumes-from specification: %s", spec)
+ }
+
+ specParts := strings.SplitN(spec, ":", 2)
+ id := specParts[0]
+ mode := "rw"
+
+ if len(specParts) == 2 {
+ mode = specParts[1]
+ if !ValidMountMode(mode) {
+ return "", "", errInvalidMode(mode)
+ }
+ // For now don't allow propagation properties while importing
+ // volumes from data container. These volumes will inherit
+ // the same propagation property as of the original volume
+ // in data container. This probably can be relaxed in future.
+ if HasPropagation(mode) {
+ return "", "", errInvalidMode(mode)
+ }
+ // Do not allow copy modes on volumes-from
+ if _, isSet := getCopyMode(mode); isSet {
+ return "", "", errInvalidMode(mode)
+ }
+ }
+ return id, mode, nil
+}
+
+func errInvalidMode(mode string) error {
+ return fmt.Errorf("invalid mode: %v", mode)
+}
+
+func errInvalidSpec(spec string) error {
+ return fmt.Errorf("Invalid volume specification: '%s'", spec)
+}
diff --git a/vendor/github.com/docker/docker/volume/volume_copy.go b/vendor/github.com/docker/docker/volume/volume_copy.go
new file mode 100644
index 00000000000..067537fb7dc
--- /dev/null
+++ b/vendor/github.com/docker/docker/volume/volume_copy.go
@@ -0,0 +1,28 @@
+package volume
+
+import "strings"
+
+const (
+ // DefaultCopyMode is the copy mode used by default for normal/named volumes
+ DefaultCopyMode = true
+)
+
+// {=isEnabled}
+var copyModes = map[string]bool{
+ "nocopy": false,
+}
+
+func copyModeExists(mode string) bool {
+ _, exists := copyModes[mode]
+ return exists
+}
+
+// GetCopyMode gets the copy mode from the mode string for mounts
+func getCopyMode(mode string) (bool, bool) {
+ for _, o := range strings.Split(mode, ",") {
+ if isEnabled, exists := copyModes[o]; exists {
+ return isEnabled, true
+ }
+ }
+ return DefaultCopyMode, false
+}
diff --git a/vendor/github.com/docker/docker/volume/volume_propagation_linux.go b/vendor/github.com/docker/docker/volume/volume_propagation_linux.go
new file mode 100644
index 00000000000..f5f28205a01
--- /dev/null
+++ b/vendor/github.com/docker/docker/volume/volume_propagation_linux.go
@@ -0,0 +1,44 @@
+// +build linux
+
+package volume
+
+import (
+ "strings"
+)
+
+// DefaultPropagationMode defines what propagation mode should be used by
+// default if user has not specified one explicitly.
+const DefaultPropagationMode string = "rprivate"
+
+// propagation modes
+var propagationModes = map[string]bool{
+ "private": true,
+ "rprivate": true,
+ "slave": true,
+ "rslave": true,
+ "shared": true,
+ "rshared": true,
+}
+
+// GetPropagation extracts and returns the mount propagation mode. If there
+// are no specifications, then by default it is "private".
+func GetPropagation(mode string) string {
+ for _, o := range strings.Split(mode, ",") {
+ if propagationModes[o] {
+ return o
+ }
+ }
+ return DefaultPropagationMode
+}
+
+// HasPropagation checks if there is a valid propagation mode present in
+// passed string. Returns true if a valid propagation mode specifier is
+// present, false otherwise.
+func HasPropagation(mode string) bool {
+ for _, o := range strings.Split(mode, ",") {
+ if propagationModes[o] {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/docker/docker/volume/volume_propagation_unsupported.go b/vendor/github.com/docker/docker/volume/volume_propagation_unsupported.go
new file mode 100644
index 00000000000..0edc89abe33
--- /dev/null
+++ b/vendor/github.com/docker/docker/volume/volume_propagation_unsupported.go
@@ -0,0 +1,22 @@
+// +build !linux
+
+package volume
+
+// DefaultPropagationMode is used only in linux. In other cases it returns
+// empty string.
+const DefaultPropagationMode string = ""
+
+// propagation modes not supported on this platform.
+var propagationModes = map[string]bool{}
+
+// GetPropagation is not supported. Return empty string.
+func GetPropagation(mode string) string {
+ return DefaultPropagationMode
+}
+
+// HasPropagation checks if there is a valid propagation mode present in
+// passed string. Returns true if a valid propagation mode specifier is
+// present, false otherwise.
+func HasPropagation(mode string) bool {
+ return false
+}
diff --git a/vendor/github.com/docker/docker/volume/volume_unix.go b/vendor/github.com/docker/docker/volume/volume_unix.go
new file mode 100644
index 00000000000..2520d7c1442
--- /dev/null
+++ b/vendor/github.com/docker/docker/volume/volume_unix.go
@@ -0,0 +1,186 @@
+// +build linux freebsd darwin solaris
+
+package volume
+
+import (
+ "fmt"
+ "path/filepath"
+ "strings"
+)
+
+// read-write modes
+var rwModes = map[string]bool{
+ "rw": true,
+ "ro": true,
+}
+
+// label modes
+var labelModes = map[string]bool{
+ "Z": true,
+ "z": true,
+}
+
+// BackwardsCompatible decides whether this mount point can be
+// used in old versions of Docker or not.
+// Only bind mounts and local volumes can be used in old versions of Docker.
+func (m *MountPoint) BackwardsCompatible() bool {
+ return len(m.Source) > 0 || m.Driver == DefaultDriverName
+}
+
+// HasResource checks whether the given absolute path for a container is in
+// this mount point. If the relative path starts with `../` then the resource
+// is outside of this mount point, but we can't simply check for this prefix
+// because it misses `..` which is also outside of the mount, so check both.
+func (m *MountPoint) HasResource(absolutePath string) bool {
+ relPath, err := filepath.Rel(m.Destination, absolutePath)
+ return err == nil && relPath != ".." && !strings.HasPrefix(relPath, fmt.Sprintf("..%c", filepath.Separator))
+}
+
+// ParseMountSpec validates the configuration of mount information is valid.
+func ParseMountSpec(spec, volumeDriver string) (*MountPoint, error) {
+ spec = filepath.ToSlash(spec)
+
+ mp := &MountPoint{
+ RW: true,
+ Propagation: DefaultPropagationMode,
+ }
+ if strings.Count(spec, ":") > 2 {
+ return nil, errInvalidSpec(spec)
+ }
+
+ arr := strings.SplitN(spec, ":", 3)
+ if arr[0] == "" {
+ return nil, errInvalidSpec(spec)
+ }
+
+ switch len(arr) {
+ case 1:
+ // Just a destination path in the container
+ mp.Destination = filepath.Clean(arr[0])
+ case 2:
+ if isValid := ValidMountMode(arr[1]); isValid {
+ // Destination + Mode is not a valid volume - volumes
+ // cannot include a mode. eg /foo:rw
+ return nil, errInvalidSpec(spec)
+ }
+ // Host Source Path or Name + Destination
+ mp.Source = arr[0]
+ mp.Destination = arr[1]
+ case 3:
+ // HostSourcePath+DestinationPath+Mode
+ mp.Source = arr[0]
+ mp.Destination = arr[1]
+ mp.Mode = arr[2] // Mode field is used by SELinux to decide whether to apply label
+ if !ValidMountMode(mp.Mode) {
+ return nil, errInvalidMode(mp.Mode)
+ }
+ mp.RW = ReadWrite(mp.Mode)
+ mp.Propagation = GetPropagation(mp.Mode)
+ default:
+ return nil, errInvalidSpec(spec)
+ }
+
+ //validate the volumes destination path
+ mp.Destination = filepath.Clean(mp.Destination)
+ if !filepath.IsAbs(mp.Destination) {
+ return nil, fmt.Errorf("Invalid volume destination path: '%s' mount path must be absolute.", mp.Destination)
+ }
+
+ // Destination cannot be "/"
+ if mp.Destination == "/" {
+ return nil, fmt.Errorf("Invalid specification: destination can't be '/' in '%s'", spec)
+ }
+
+ name, source := ParseVolumeSource(mp.Source)
+ if len(source) == 0 {
+ mp.Source = "" // Clear it out as we previously assumed it was not a name
+ mp.Driver = volumeDriver
+ // Named volumes can't have propagation properties specified.
+ // Their defaults will be decided by docker. This is just a
+ // safeguard. Don't want to get into situations where named
+ // volumes were mounted as '[r]shared' inside container and
+ // container does further mounts under that volume and these
+ // mounts become visible on host and later original volume
+ // cleanup becomes an issue if container does not unmount
+ // submounts explicitly.
+ if HasPropagation(mp.Mode) {
+ return nil, errInvalidSpec(spec)
+ }
+ } else {
+ mp.Source = filepath.Clean(source)
+ }
+
+ copyData, isSet := getCopyMode(mp.Mode)
+ // do not allow copy modes on binds
+ if len(name) == 0 && isSet {
+ return nil, errInvalidMode(mp.Mode)
+ }
+
+ mp.CopyData = copyData
+ mp.Name = name
+
+ return mp, nil
+}
+
+// ParseVolumeSource parses the origin sources that's mounted into the container.
+// It returns a name and a source. It looks to see if the spec passed in
+// is an absolute file. If it is, it assumes the spec is a source. If not,
+// it assumes the spec is a name.
+func ParseVolumeSource(spec string) (string, string) {
+ if !filepath.IsAbs(spec) {
+ return spec, ""
+ }
+ return "", spec
+}
+
+// IsVolumeNameValid checks a volume name in a platform specific manner.
+func IsVolumeNameValid(name string) (bool, error) {
+ return true, nil
+}
+
+// ValidMountMode will make sure the mount mode is valid.
+// returns if it's a valid mount mode or not.
+func ValidMountMode(mode string) bool {
+ rwModeCount := 0
+ labelModeCount := 0
+ propagationModeCount := 0
+ copyModeCount := 0
+
+ for _, o := range strings.Split(mode, ",") {
+ switch {
+ case rwModes[o]:
+ rwModeCount++
+ case labelModes[o]:
+ labelModeCount++
+ case propagationModes[o]:
+ propagationModeCount++
+ case copyModeExists(o):
+ copyModeCount++
+ default:
+ return false
+ }
+ }
+
+ // Only one string for each mode is allowed.
+ if rwModeCount > 1 || labelModeCount > 1 || propagationModeCount > 1 || copyModeCount > 1 {
+ return false
+ }
+ return true
+}
+
+// ReadWrite tells you if a mode string is a valid read-write mode or not.
+// If there are no specifications w.r.t read write mode, then by default
+// it returns true.
+func ReadWrite(mode string) bool {
+ if !ValidMountMode(mode) {
+ return false
+ }
+
+ for _, o := range strings.Split(mode, ",") {
+ if o == "ro" {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/docker/docker/volume/volume_windows.go b/vendor/github.com/docker/docker/volume/volume_windows.go
new file mode 100644
index 00000000000..7aa6983d2ce
--- /dev/null
+++ b/vendor/github.com/docker/docker/volume/volume_windows.go
@@ -0,0 +1,206 @@
+package volume
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/docker/docker/pkg/system"
+)
+
+// read-write modes
+var rwModes = map[string]bool{
+ "rw": true,
+}
+
+// read-only modes
+var roModes = map[string]bool{
+ "ro": true,
+}
+
+const (
+ // Spec should be in the format [source:]destination[:mode]
+ //
+ // Examples: c:\foo bar:d:rw
+ // c:\foo:d:\bar
+ // myname:d:
+ // d:\
+ //
+ // Explanation of this regex! Thanks @thaJeztah on IRC and gist for help. See
+ // https://gist.github.com/thaJeztah/6185659e4978789fb2b2. A good place to
+ // test is https://regex-golang.appspot.com/assets/html/index.html
+ //
+ // Useful link for referencing named capturing groups:
+ // http://stackoverflow.com/questions/20750843/using-named-matches-from-go-regex
+ //
+ // There are three match groups: source, destination and mode.
+ //
+
+ // RXHostDir is the first option of a source
+ RXHostDir = `[a-z]:\\(?:[^\\/:*?"<>|\r\n]+\\?)*`
+ // RXName is the second option of a source
+ RXName = `[^\\/:*?"<>|\r\n]+`
+ // RXReservedNames are reserved names not possible on Windows
+ RXReservedNames = `(con)|(prn)|(nul)|(aux)|(com[1-9])|(lpt[1-9])`
+
+ // RXSource is the combined possibilities for a source
+ RXSource = `((?P