diff --git a/.gitignore b/.gitignore
index 722d5e71d9..255f7bad16 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1,2 @@
.vscode
+opa
diff --git a/.travis.yml b/.travis.yml
index 54bfe39c1f..1887341e4b 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,4 +1,4 @@
language: go
-
-install: ./install-deps-gen-code.sh
-
+go:
+ - 1.5
+ - 1.6
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000000..5b9f103eaf
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,6 @@
+# Change Log
+
+All notable changes to this project will be documented in this file. This
+project adheres to [Semantic Versioning](http://semver.org/).
+
+## Unreleased
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000000..7c9f6390d5
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,35 @@
+# Copyright 2015 The OPA Authors. All rights reserved.
+# Use of this source code is governed by an Apache2
+# license that can be found in the LICENSE file.
+
+PACKAGES := github.com/open-policy-agent/opa/jsonlog/.../ \
+ github.com/open-policy-agent/opa/cmd/.../
+
+BUILD_COMMIT := $(shell ./build/get-build-commit.sh)
+BUILD_TIMESTAMP := $(shell ./build/get-build-timestamp.sh)
+BUILD_HOSTNAME := $(shell ./build/get-build-hostname.sh)
+
+LDFLAGS := -ldflags "-X github.com/open-policy-agent/opa/version.Vcs=$(BUILD_COMMIT) \
+ -X github.com/open-policy-agent/opa/version.Timestamp=$(BUILD_TIMESTAMP) \
+ -X github.com/open-policy-agent/opa/version.Hostname=$(BUILD_HOSTNAME)"
+
+GO := go
+
+GO15VENDOREXPERIMENT := 1
+export GO15VENDOREXPERIMENT
+
+.PHONY: all generate build test clean
+
+all: build test
+
+generate:
+ $(GO) generate
+
+build:
+ $(GO) build -o opa $(LDFLAGS)
+
+test:
+ $(GO) test -v $(PACKAGES)
+
+clean:
+ rm -f ./opa
diff --git a/build/get-build-commit.sh b/build/get-build-commit.sh
new file mode 100755
index 0000000000..5c386553cf
--- /dev/null
+++ b/build/get-build-commit.sh
@@ -0,0 +1,9 @@
+#!/usr/bin/env bash
+
+GIT_SHA=$(git rev-parse --short HEAD)
+
+if [ -z "$(git status --porcelain 2>/dev/null)" ]; then
+ echo $GIT_SHA
+else
+ echo "$GIT_SHA-dirty"
+fi
diff --git a/build/get-build-hostname.sh b/build/get-build-hostname.sh
new file mode 100755
index 0000000000..0ed5c926ce
--- /dev/null
+++ b/build/get-build-hostname.sh
@@ -0,0 +1,3 @@
+#!/usr/bin/env bash
+
+hostname -f
diff --git a/build/get-build-timestamp.sh b/build/get-build-timestamp.sh
new file mode 100755
index 0000000000..8ba5567b6d
--- /dev/null
+++ b/build/get-build-timestamp.sh
@@ -0,0 +1,3 @@
+#!/usr/bin/env bash
+
+date -u +"%Y-%m-%dT%H:%M:%SZ"
diff --git a/cmd/commands.go b/cmd/commands.go
new file mode 100644
index 0000000000..f043c9048b
--- /dev/null
+++ b/cmd/commands.go
@@ -0,0 +1,15 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package cmd
+
+import "github.com/spf13/cobra"
+import "path"
+import "os"
+
+var RootCommand = &cobra.Command{
+ Use: path.Base(os.Args[0]),
+ Short: "Open Policy Agent (OPA)",
+ Long: "An open source project to policy enable any application.",
+}
diff --git a/cmd/version.go b/cmd/version.go
new file mode 100644
index 0000000000..956157d7d9
--- /dev/null
+++ b/cmd/version.go
@@ -0,0 +1,25 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package cmd
+
+import "fmt"
+import "github.com/spf13/cobra"
+import "github.com/open-policy-agent/opa/version"
+
+var versionCommand = &cobra.Command{
+ Use: "version",
+ Short: "Print the version of OPA",
+ Long: "Show version and build information for OPA.",
+ Run: func(cmd *cobra.Command, args []string) {
+ fmt.Println("Version: " + version.Version)
+ fmt.Println("Build Commit: " + version.Vcs)
+ fmt.Println("Build Timestamp: " + version.Timestamp)
+ fmt.Println("Build Hostname: " + version.Hostname)
+ },
+}
+
+func init() {
+ RootCommand.AddCommand(versionCommand)
+}
diff --git a/docs/DEVELOPMENT.md b/docs/DEVELOPMENT.md
new file mode 100644
index 0000000000..a24c705359
--- /dev/null
+++ b/docs/DEVELOPMENT.md
@@ -0,0 +1,112 @@
+# Development
+
+## Environment
+
+OPA is written in the [Go](https://golang.org) programming language.
+
+If you are not familiar with Go we recommend you read through the [How to Write Go
+Code](https://golang.org/doc/code.html) article to familiarize yourself with the standard Go development environment.
+
+Requirements:
+
+- Git
+- GitHub account (if you are contributing)
+- Go (version 1.5.x and 1.6.x are supported)
+- GNU Make
+
+## Getting Started
+
+After cloning the repository, you can run `make all` to build the project and
+execute all of the tests. If this succeeds, there should be a binary
+in the top directory (opa).
+
+Verify the build was successful by running `opa version`.
+
+You can re-build the project with `make build` and execute all of the tests
+with `make test`.
+
+## Workflow
+
+1. Go to [https://github.com/open-policy-agent/opa](https://github.com/open-policy-agent/opa) and fork the repository
+ into your account by clicking the "Fork" button.
+
+1. Clone the fork to your local machine.
+
+ ```
+ cd $GOPATH
+ mkdir -p src/github.com/open-policy-agent
+ cd src/github.com/open-policy-agent
+ git clone git@github.com//opa.git opa
+ cd opa
+ git remote add upstream https://github.com/open-policy-agent/opa.git
+ ```
+
+1. Create a branch for your changes.
+
+ ```
+ git checkout -b somefeature
+ ```
+
+1. Update your local branch with upstream.
+
+ ```
+ git fetch upstream
+ git rebase upstream/master
+ ```
+
+1. Develop your changes and regularly update your local branch against upstream.
+
+ - Make sure you run `go fmt` on your code before submitting a Pull Request.
+
+1. Commit changes and push to your fork.
+
+ ```
+ git commit
+ git push origin somefeature
+ ```
+
+1. Submit a Pull Request via https://github.com/\/opa. You
+ should be prompted to with a "Compare and Pull Request" button that
+ mentions your branch.
+
+1. Once your Pull Request has been reviewed and signed off please squash your
+ commits. If you have a specific reason to leave multiple commits in the
+ Pull Request, please mention it in the discussion.
+
+ > If you are not familiar with squashing commits, see [the following blog post for a good overview](http://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html).
+
+## Dependencies
+
+[Glide](https://github.com/Masterminds/glide) is a command line tool used for
+dependency management. You must have Glide installed in order to add new
+dependencies or update existing dependencies. If you are not changing
+dependencies you do not have to install Glide, all of the dependencies are
+contained in the vendor directory.
+
+If you need to add a dependency to the project:
+
+1. Run `glide get ` to download the package.
+ - This command should be used instead of `go get `.
+ - The package will be stored under the vendor directory.
+ - The glide.yaml file will be updated.
+1. Manually remove the VCS directories (e.g., .git, .hg, etc.) from the new
+ vendor directories.
+1. Commit the changes in glide.yaml, glide.lock, and new vendor directories.
+
+If you need to update the dependencies:
+
+1. Run `glide update --update-vendored`.
+1. Commit the changes to the glide.lock file and any files under the vendor
+ directory.
+
+## Opalog
+
+If you need to modify the Opalog syntax you must update jsonlog/parser.peg
+and run `make generate` to re-generate the parser code.
+
+> If you encounter an error because "pigeon" is not installed, run `glide
+> rebuild` to build and install the vendored dependencies (which include the
+> parser generator). Note, you will need to have [Glide](https://github.com/Masterminds/glide)
+> installed for this.
+
+Commit the changes to the parser.peg and parser.go files.
diff --git a/docs/RELEASE.md b/docs/RELEASE.md
new file mode 100644
index 0000000000..e1b641e036
--- /dev/null
+++ b/docs/RELEASE.md
@@ -0,0 +1,80 @@
+# Release Process
+
+## Overview
+
+The release process consists of three phases: versioning, building, and
+publishing.
+
+Versioning involves maintaining the CHANGELOG.md and version.go files inside
+the repository and tagging the repository to identify specific releases.
+
+Building involves obtaining a copy of the repository, checking out the release
+tag, and building the packages.
+
+Publishing involves creating a new *Release* on GitHub with the relevant
+CHANGELOG.md snippet and uploading the packages from the build phase.
+
+## Versioning
+
+1. Obtain copy of remote repository.
+
+ ```
+ git clone git@github.com/open-policy-agent/opa.git
+ ```
+
+1. Edit CHANGELOG.md to update the Unreleased header (e.g., s/Unreleased/0.12.8/) and add any missing items to prepare for release.
+
+1. Edit version/version.go to set Version variable to prepare for release (e.g., s/Version = “0.12.8-dev”/Version = "0.12.8”/).
+
+1. Commit the changes and push to remote repository.
+
+ ```
+ git commit -a -m “Prepare v release”
+ git push origin master
+ ```
+
+1. Tag repository with release version and push tags to remote repository.
+
+ ```
+ git tag v
+ git push origin --tags
+ ```
+
+1. Edit CHANGELOG.md to add back the Unreleased header to prepare for development.
+
+1. Edit version/version.go to set Version variable to prepare for development (e.g., s/Version = “0.12.8”/Version = “0.12.9-dev”/).
+
+1. Commit the changes and push to remote repository.
+
+ ```
+ git commit -a -m “Prepare v development”
+ git push origin master
+ ```
+
+## Building
+
+1. Obtain copy of remote repository.
+
+ ```
+ git clone git@github.com/open-policy-agent/opa.git
+ ```
+
+1. Checkout release tag.
+
+ ```
+ git checkout v
+ ```
+
+1. Run command to build packages. This will produce a bunch of binaries (e.g., amd64/linux, i386/linux, amd64/darwin, etc.) that can be published (“distributions”).
+
+ ```
+ make dist
+ ```
+
+## Publishing
+
+1. Open browser and go to https://github.com/open-policy-agent/opa/releases
+
+1. Create a new release for the version.
+ - Copy the changelog content into the message.
+ - Upload the distributions packages.
diff --git a/glide.lock b/glide.lock
new file mode 100644
index 0000000000..95a8dfa3b0
--- /dev/null
+++ b/glide.lock
@@ -0,0 +1,77 @@
+hash: 60660dbeea966624ef47c09512b10812b4fd5e9e82876d915ca69d72dbc3157c
+updated: 2016-03-29T17:05:39.985980976-07:00
+imports:
+- name: github.com/armon/consul-api
+ version: dcfedd50ed5334f96adee43fc88518a4f095e15c
+ repo: https://github.com/armon/consul-api
+- name: github.com/BurntSushi/toml
+ version: bbd5bb678321a0d6e58f1099321dfa73391c1b6f
+ repo: https://github.com/BurntSushi/toml
+- name: github.com/coreos/go-etcd
+ version: 003851be7bb0694fe3cc457a49529a19388ee7cf
+ repo: https://github.com/coreos/go-etcd
+- name: github.com/cpuguy83/go-md2man
+ version: 2724a9c9051aa62e9cca11304e7dd518e9e41599
+ repo: https://github.com/cpuguy83/go-md2man
+- name: github.com/hashicorp/hcl
+ version: 2604f3bda7e8960c1be1063709e7d7f0765048d0
+ repo: https://github.com/hashicorp/hcl
+- name: github.com/kr/pretty
+ version: add1dbc86daf0f983cd4a48ceb39deb95c729b67
+ repo: https://github.com/kr/pretty
+- name: github.com/kr/pty
+ version: f7ee69f31298ecbe5d2b349c711e2547a617d398
+ repo: https://github.com/kr/pty
+- name: github.com/kr/text
+ version: bb797dc4fb8320488f47bf11de07a733d7233e1f
+ repo: https://github.com/kr/text
+- name: github.com/magiconair/properties
+ version: c265cfa48dda6474e208715ca93e987829f572f8
+ repo: https://github.com/magiconair/properties
+- name: github.com/mitchellh/mapstructure
+ version: d2dd0262208475919e1a362f675cfc0e7c10e905
+ repo: https://github.com/mitchellh/mapstructure
+- name: github.com/PuerkitoBio/pigeon
+ version: a5221784523de14130c00a8c389148a1b2ad260c
+- name: github.com/russross/blackfriday
+ version: b43df972fb5fdf3af8d2e90f38a69d374fe26dd0
+ repo: https://github.com/russross/blackfriday
+- name: github.com/shurcooL/sanitized_anchor_name
+ version: 10ef21a441db47d8b13ebcc5fd2310f636973c77
+ repo: https://github.com/shurcooL/sanitized_anchor_name
+- name: github.com/spf13/cast
+ version: 27b586b42e29bec072fe7379259cc719e1289da6
+ repo: https://github.com/spf13/cast
+- name: github.com/spf13/cobra
+ version: c678ff029ee250b65714e518f4f5c5cb934955de
+- name: github.com/spf13/jwalterweatherman
+ version: 33c24e77fb80341fe7130ee7c594256ff08ccc46
+ repo: https://github.com/spf13/jwalterweatherman
+- name: github.com/spf13/pflag
+ version: 7f60f83a2c81bc3c3c0d5297f61ddfa68da9d3b7
+- name: github.com/spf13/viper
+ version: c975dc1b4eacf4ec7fdbf0873638de5d090ba323
+ repo: https://github.com/spf13/viper
+- name: github.com/ugorji/go
+ version: a396ed22fc049df733440d90efe17475e3929ccb
+ repo: https://github.com/ugorji/go
+- name: github.com/xordataexchange/crypt
+ version: 749e360c8f236773f28fc6d3ddfce4a470795227
+ repo: https://github.com/xordataexchange/crypt
+- name: golang.org/x/crypto
+ version: 9e7f5dc375abeb9619ea3c5c58502c428f457aa2
+- name: golang.org/x/net
+ version: 31df19d69da8728e9220def59b80ee577c3e48bf
+- name: golang.org/x/text
+ version: 1b466db55e0ba5d56ef5315c728216b42f796491
+- name: golang.org/x/tools
+ version: 84e7bc0dd39bab24b696dde4d714641fa738f945
+ subpackages:
+ - cmd/goimports
+- name: gopkg.in/fsnotify.v1
+ version: 875cf421b32f8f1b31bd43776297876d01542279
+ repo: https://gopkg.in/fsnotify.v1
+- name: gopkg.in/yaml.v2
+ version: a83829b6f1293c91addabc89d0571c246397bbf4
+ repo: https://gopkg.in/yaml.v2
+devImports: []
diff --git a/glide.yaml b/glide.yaml
new file mode 100644
index 0000000000..daa416d80d
--- /dev/null
+++ b/glide.yaml
@@ -0,0 +1,7 @@
+package: github.com/open-policy-agent/opa
+import:
+ - package: github.com/PuerkitoBio/pigeon
+ - package: golang.org/x/tools
+ subpackages:
+ - cmd/goimports
+ - package: github.com/spf13/cobra
diff --git a/install-deps-gen-code.sh b/install-deps-gen-code.sh
deleted file mode 100755
index f0d8b64f51..0000000000
--- a/install-deps-gen-code.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env sh
-
-# install dependencies
-go get -u github.com/PuerkitoBio/pigeon
-go get golang.org/x/tools/cmd/goimports
-
-# generate source code for parser. Delete first so no silent errors.
-rm src/jsonlog/parser.go
-pigeon src/jsonlog/jsonlog.peg | goimports > src/jsonlog/parser.go
-
-
diff --git a/src/jsonlog/jsonlog.peg b/jsonlog/jsonlog.peg
similarity index 100%
rename from src/jsonlog/jsonlog.peg
rename to jsonlog/jsonlog.peg
diff --git a/src/jsonlog/parser.go b/jsonlog/parser.go
similarity index 100%
rename from src/jsonlog/parser.go
rename to jsonlog/parser.go
diff --git a/src/jsonlog/parser_test.go b/jsonlog/parser_test.go
similarity index 100%
rename from src/jsonlog/parser_test.go
rename to jsonlog/parser_test.go
diff --git a/src/jsonlog/syntax.go b/jsonlog/syntax.go
similarity index 100%
rename from src/jsonlog/syntax.go
rename to jsonlog/syntax.go
diff --git a/src/jsonlog/syntax_test.go b/jsonlog/syntax_test.go
similarity index 100%
rename from src/jsonlog/syntax_test.go
rename to jsonlog/syntax_test.go
diff --git a/main.go b/main.go
new file mode 100644
index 0000000000..28db270ed8
--- /dev/null
+++ b/main.go
@@ -0,0 +1,21 @@
+// Copyright 2016 The OPA Authors. All rights reserved.
+// Use of this source code is governed by an Apache2
+// license that can be found in the LICENSE file.
+
+package main
+
+import "fmt"
+import "os"
+import "github.com/open-policy-agent/opa/cmd"
+
+func main() {
+ if err := cmd.RootCommand.Execute(); err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+}
+
+// Opalog parser generation:
+//
+//go:generate pigeon -o jsonlog/parser.go jsonlog/jsonlog.peg
+//go:generate goimports -w jsonlog/parser.go
diff --git a/src/main.go b/src/main.go
deleted file mode 100644
index a1c70da90b..0000000000
--- a/src/main.go
+++ /dev/null
@@ -1,8 +0,0 @@
-package main
-
-import "fmt"
-
-func main() {
- fmt.Println("Hello world")
-}
-
diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore
new file mode 100644
index 0000000000..0cd3800377
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/.gitignore
@@ -0,0 +1,5 @@
+TAGS
+tags
+.*.swp
+tomlcheck/tomlcheck
+toml.test
diff --git a/vendor/github.com/BurntSushi/toml/.travis.yml b/vendor/github.com/BurntSushi/toml/.travis.yml
new file mode 100644
index 0000000000..43caf6d021
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/.travis.yml
@@ -0,0 +1,12 @@
+language: go
+go:
+ - 1.1
+ - 1.2
+ - tip
+install:
+ - go install ./...
+ - go get github.com/BurntSushi/toml-test
+script:
+ - export PATH="$PATH:$HOME/gopath/bin"
+ - make test
+
diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE
new file mode 100644
index 0000000000..21e0938cae
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/COMPATIBLE
@@ -0,0 +1,3 @@
+Compatible with TOML version
+[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)
+
diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING
new file mode 100644
index 0000000000..5a8e332545
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/COPYING
@@ -0,0 +1,14 @@
+ DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+ Version 2, December 2004
+
+ Copyright (C) 2004 Sam Hocevar
+
+ Everyone is permitted to copy and distribute verbatim or modified
+ copies of this license document, and changing it is allowed as long
+ as the name is changed.
+
+ DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. You just DO WHAT THE FUCK YOU WANT TO.
+
diff --git a/vendor/github.com/BurntSushi/toml/Makefile b/vendor/github.com/BurntSushi/toml/Makefile
new file mode 100644
index 0000000000..3600848d33
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/Makefile
@@ -0,0 +1,19 @@
+install:
+ go install ./...
+
+test: install
+ go test -v
+ toml-test toml-test-decoder
+ toml-test -encoder toml-test-encoder
+
+fmt:
+ gofmt -w *.go */*.go
+ colcheck *.go */*.go
+
+tags:
+ find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
+
+push:
+ git push origin master
+ git push github master
+
diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md
new file mode 100644
index 0000000000..5a5df63709
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/README.md
@@ -0,0 +1,220 @@
+## TOML parser and encoder for Go with reflection
+
+TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
+reflection interface similar to Go's standard library `json` and `xml`
+packages. This package also supports the `encoding.TextUnmarshaler` and
+`encoding.TextMarshaler` interfaces so that you can define custom data
+representations. (There is an example of this below.)
+
+Spec: https://github.com/mojombo/toml
+
+Compatible with TOML version
+[v0.2.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.2.0.md)
+
+Documentation: http://godoc.org/github.com/BurntSushi/toml
+
+Installation:
+
+```bash
+go get github.com/BurntSushi/toml
+```
+
+Try the toml validator:
+
+```bash
+go get github.com/BurntSushi/toml/cmd/tomlv
+tomlv some-toml-file.toml
+```
+
+[![Build status](https://api.travis-ci.org/BurntSushi/toml.png)](https://travis-ci.org/BurntSushi/toml)
+
+
+### Testing
+
+This package passes all tests in
+[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
+and the encoder.
+
+### Examples
+
+This package works similarly to how the Go standard library handles `XML`
+and `JSON`. Namely, data is loaded into Go values via reflection.
+
+For the simplest example, consider some TOML file as just a list of keys
+and values:
+
+```toml
+Age = 25
+Cats = [ "Cauchy", "Plato" ]
+Pi = 3.14
+Perfection = [ 6, 28, 496, 8128 ]
+DOB = 1987-07-05T05:45:00Z
+```
+
+Which could be defined in Go as:
+
+```go
+type Config struct {
+ Age int
+ Cats []string
+ Pi float64
+ Perfection []int
+ DOB time.Time // requires `import time`
+}
+```
+
+And then decoded with:
+
+```go
+var conf Config
+if _, err := toml.Decode(tomlData, &conf); err != nil {
+ // handle error
+}
+```
+
+You can also use struct tags if your struct field name doesn't map to a TOML
+key value directly:
+
+```toml
+some_key_NAME = "wat"
+```
+
+```go
+type TOML struct {
+ ObscureKey string `toml:"some_key_NAME"`
+}
+```
+
+### Using the `encoding.TextUnmarshaler` interface
+
+Here's an example that automatically parses duration strings into
+`time.Duration` values:
+
+```toml
+[[song]]
+name = "Thunder Road"
+duration = "4m49s"
+
+[[song]]
+name = "Stairway to Heaven"
+duration = "8m03s"
+```
+
+Which can be decoded with:
+
+```go
+type song struct {
+ Name string
+ Duration duration
+}
+type songs struct {
+ Song []song
+}
+var favorites songs
+if _, err := toml.Decode(blob, &favorites); err != nil {
+ log.Fatal(err)
+}
+
+for _, s := range favorites.Song {
+ fmt.Printf("%s (%s)\n", s.Name, s.Duration)
+}
+```
+
+And you'll also need a `duration` type that satisfies the
+`encoding.TextUnmarshaler` interface:
+
+```go
+type duration struct {
+ time.Duration
+}
+
+func (d *duration) UnmarshalText(text []byte) error {
+ var err error
+ d.Duration, err = time.ParseDuration(string(text))
+ return err
+}
+```
+
+### More complex usage
+
+Here's an example of how to load the example from the official spec page:
+
+```toml
+# This is a TOML document. Boom.
+
+title = "TOML Example"
+
+[owner]
+name = "Tom Preston-Werner"
+organization = "GitHub"
+bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
+dob = 1979-05-27T07:32:00Z # First class dates? Why not?
+
+[database]
+server = "192.168.1.1"
+ports = [ 8001, 8001, 8002 ]
+connection_max = 5000
+enabled = true
+
+[servers]
+
+ # You can indent as you please. Tabs or spaces. TOML don't care.
+ [servers.alpha]
+ ip = "10.0.0.1"
+ dc = "eqdc10"
+
+ [servers.beta]
+ ip = "10.0.0.2"
+ dc = "eqdc10"
+
+[clients]
+data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
+
+# Line breaks are OK when inside arrays
+hosts = [
+ "alpha",
+ "omega"
+]
+```
+
+And the corresponding Go types are:
+
+```go
+type tomlConfig struct {
+ Title string
+ Owner ownerInfo
+ DB database `toml:"database"`
+ Servers map[string]server
+ Clients clients
+}
+
+type ownerInfo struct {
+ Name string
+ Org string `toml:"organization"`
+ Bio string
+ DOB time.Time
+}
+
+type database struct {
+ Server string
+ Ports []int
+ ConnMax int `toml:"connection_max"`
+ Enabled bool
+}
+
+type server struct {
+ IP string
+ DC string
+}
+
+type clients struct {
+ Data [][]interface{}
+ Hosts []string
+}
+```
+
+Note that a case insensitive match will be tried if an exact match can't be
+found.
+
+A working example of the above can be found in `_examples/example.{go,toml}`.
+
diff --git a/vendor/github.com/BurntSushi/toml/_examples/example.go b/vendor/github.com/BurntSushi/toml/_examples/example.go
new file mode 100644
index 0000000000..79f31f2758
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/_examples/example.go
@@ -0,0 +1,61 @@
+package main
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/BurntSushi/toml"
+)
+
+type tomlConfig struct {
+ Title string
+ Owner ownerInfo
+ DB database `toml:"database"`
+ Servers map[string]server
+ Clients clients
+}
+
+type ownerInfo struct {
+ Name string
+ Org string `toml:"organization"`
+ Bio string
+ DOB time.Time
+}
+
+type database struct {
+ Server string
+ Ports []int
+ ConnMax int `toml:"connection_max"`
+ Enabled bool
+}
+
+type server struct {
+ IP string
+ DC string
+}
+
+type clients struct {
+ Data [][]interface{}
+ Hosts []string
+}
+
+func main() {
+ var config tomlConfig
+ if _, err := toml.DecodeFile("example.toml", &config); err != nil {
+ fmt.Println(err)
+ return
+ }
+
+ fmt.Printf("Title: %s\n", config.Title)
+ fmt.Printf("Owner: %s (%s, %s), Born: %s\n",
+ config.Owner.Name, config.Owner.Org, config.Owner.Bio,
+ config.Owner.DOB)
+ fmt.Printf("Database: %s %v (Max conn. %d), Enabled? %v\n",
+ config.DB.Server, config.DB.Ports, config.DB.ConnMax,
+ config.DB.Enabled)
+ for serverName, server := range config.Servers {
+ fmt.Printf("Server: %s (%s, %s)\n", serverName, server.IP, server.DC)
+ }
+ fmt.Printf("Client data: %v\n", config.Clients.Data)
+ fmt.Printf("Client hosts: %v\n", config.Clients.Hosts)
+}
diff --git a/vendor/github.com/BurntSushi/toml/_examples/example.toml b/vendor/github.com/BurntSushi/toml/_examples/example.toml
new file mode 100644
index 0000000000..32c7a4faa4
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/_examples/example.toml
@@ -0,0 +1,35 @@
+# This is a TOML document. Boom.
+
+title = "TOML Example"
+
+[owner]
+name = "Tom Preston-Werner"
+organization = "GitHub"
+bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
+dob = 1979-05-27T07:32:00Z # First class dates? Why not?
+
+[database]
+server = "192.168.1.1"
+ports = [ 8001, 8001, 8002 ]
+connection_max = 5000
+enabled = true
+
+[servers]
+
+ # You can indent as you please. Tabs or spaces. TOML don't care.
+ [servers.alpha]
+ ip = "10.0.0.1"
+ dc = "eqdc10"
+
+ [servers.beta]
+ ip = "10.0.0.2"
+ dc = "eqdc10"
+
+[clients]
+data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
+
+# Line breaks are OK when inside arrays
+hosts = [
+ "alpha",
+ "omega"
+]
diff --git a/vendor/github.com/BurntSushi/toml/_examples/hard.toml b/vendor/github.com/BurntSushi/toml/_examples/hard.toml
new file mode 100644
index 0000000000..26145d2b42
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/_examples/hard.toml
@@ -0,0 +1,22 @@
+# Test file for TOML
+# Only this one tries to emulate a TOML file written by a user of the kind of parser writers probably hate
+# This part you'll really hate
+
+[the]
+test_string = "You'll hate me after this - #" # " Annoying, isn't it?
+
+ [the.hard]
+ test_array = [ "] ", " # "] # ] There you go, parse this!
+ test_array2 = [ "Test #11 ]proved that", "Experiment #9 was a success" ]
+ # You didn't think it'd as easy as chucking out the last #, did you?
+ another_test_string = " Same thing, but with a string #"
+ harder_test_string = " And when \"'s are in the string, along with # \"" # "and comments are there too"
+ # Things will get harder
+
+ [the.hard.bit#]
+ what? = "You don't think some user won't do that?"
+ multi_line_array = [
+ "]",
+ # ] Oh yes I did
+ ]
+
diff --git a/vendor/github.com/BurntSushi/toml/_examples/implicit.toml b/vendor/github.com/BurntSushi/toml/_examples/implicit.toml
new file mode 100644
index 0000000000..1dea5ceb44
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/_examples/implicit.toml
@@ -0,0 +1,4 @@
+# [x] you
+# [x.y] don't
+# [x.y.z] need these
+[x.y.z.w] # for this to work
diff --git a/vendor/github.com/BurntSushi/toml/_examples/invalid-apples.toml b/vendor/github.com/BurntSushi/toml/_examples/invalid-apples.toml
new file mode 100644
index 0000000000..74e9e337ed
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/_examples/invalid-apples.toml
@@ -0,0 +1,6 @@
+# DO NOT WANT
+[fruit]
+type = "apple"
+
+[fruit.type]
+apple = "yes"
diff --git a/vendor/github.com/BurntSushi/toml/_examples/invalid.toml b/vendor/github.com/BurntSushi/toml/_examples/invalid.toml
new file mode 100644
index 0000000000..beb1dba54d
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/_examples/invalid.toml
@@ -0,0 +1,35 @@
+# This is an INVALID TOML document. Boom.
+# Can you spot the error without help?
+
+title = "TOML Example"
+
+[owner]
+name = "Tom Preston-Werner"
+organization = "GitHub"
+bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
+dob = 1979-05-27T7:32:00Z # First class dates? Why not?
+
+[database]
+server = "192.168.1.1"
+ports = [ 8001, 8001, 8002 ]
+connection_max = 5000
+enabled = true
+
+[servers]
+ # You can indent as you please. Tabs or spaces. TOML don't care.
+ [servers.alpha]
+ ip = "10.0.0.1"
+ dc = "eqdc10"
+
+ [servers.beta]
+ ip = "10.0.0.2"
+ dc = "eqdc10"
+
+[clients]
+data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
+
+# Line breaks are OK when inside arrays
+hosts = [
+ "alpha",
+ "omega"
+]
diff --git a/vendor/github.com/BurntSushi/toml/_examples/readme1.toml b/vendor/github.com/BurntSushi/toml/_examples/readme1.toml
new file mode 100644
index 0000000000..3e1261d4c2
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/_examples/readme1.toml
@@ -0,0 +1,5 @@
+Age = 25
+Cats = [ "Cauchy", "Plato" ]
+Pi = 3.14
+Perfection = [ 6, 28, 496, 8128 ]
+DOB = 1987-07-05T05:45:00Z
diff --git a/vendor/github.com/BurntSushi/toml/_examples/readme2.toml b/vendor/github.com/BurntSushi/toml/_examples/readme2.toml
new file mode 100644
index 0000000000..b51cd93408
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/_examples/readme2.toml
@@ -0,0 +1 @@
+some_key_NAME = "wat"
diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING b/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING
new file mode 100644
index 0000000000..5a8e332545
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING
@@ -0,0 +1,14 @@
+ DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+ Version 2, December 2004
+
+ Copyright (C) 2004 Sam Hocevar
+
+ Everyone is permitted to copy and distribute verbatim or modified
+ copies of this license document, and changing it is allowed as long
+ as the name is changed.
+
+ DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. You just DO WHAT THE FUCK YOU WANT TO.
+
diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md b/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md
new file mode 100644
index 0000000000..24421eb703
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md
@@ -0,0 +1,14 @@
+# Implements the TOML test suite interface
+
+This is an implementation of the interface expected by
+[toml-test](https://github.com/BurntSushi/toml-test) for my
+[toml parser written in Go](https://github.com/BurntSushi/toml).
+In particular, it maps TOML data on `stdin` to a JSON format on `stdout`.
+
+
+Compatible with TOML version
+[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)
+
+Compatible with `toml-test` version
+[v0.2.0](https://github.com/BurntSushi/toml-test/tree/v0.2.0)
+
diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go b/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go
new file mode 100644
index 0000000000..14e7557005
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go
@@ -0,0 +1,90 @@
+// Command toml-test-decoder satisfies the toml-test interface for testing
+// TOML decoders. Namely, it accepts TOML on stdin and outputs JSON on stdout.
+package main
+
+import (
+ "encoding/json"
+ "flag"
+ "fmt"
+ "log"
+ "os"
+ "path"
+ "time"
+
+ "github.com/BurntSushi/toml"
+)
+
+func init() {
+ log.SetFlags(0)
+
+ flag.Usage = usage
+ flag.Parse()
+}
+
+func usage() {
+ log.Printf("Usage: %s < toml-file\n", path.Base(os.Args[0]))
+ flag.PrintDefaults()
+
+ os.Exit(1)
+}
+
+func main() {
+ if flag.NArg() != 0 {
+ flag.Usage()
+ }
+
+ var tmp interface{}
+ if _, err := toml.DecodeReader(os.Stdin, &tmp); err != nil {
+ log.Fatalf("Error decoding TOML: %s", err)
+ }
+
+ typedTmp := translate(tmp)
+ if err := json.NewEncoder(os.Stdout).Encode(typedTmp); err != nil {
+ log.Fatalf("Error encoding JSON: %s", err)
+ }
+}
+
+func translate(tomlData interface{}) interface{} {
+ switch orig := tomlData.(type) {
+ case map[string]interface{}:
+ typed := make(map[string]interface{}, len(orig))
+ for k, v := range orig {
+ typed[k] = translate(v)
+ }
+ return typed
+ case []map[string]interface{}:
+ typed := make([]map[string]interface{}, len(orig))
+ for i, v := range orig {
+ typed[i] = translate(v).(map[string]interface{})
+ }
+ return typed
+ case []interface{}:
+ typed := make([]interface{}, len(orig))
+ for i, v := range orig {
+ typed[i] = translate(v)
+ }
+
+ // We don't really need to tag arrays, but let's be future proof.
+ // (If TOML ever supports tuples, we'll need this.)
+ return tag("array", typed)
+ case time.Time:
+ return tag("datetime", orig.Format("2006-01-02T15:04:05Z"))
+ case bool:
+ return tag("bool", fmt.Sprintf("%v", orig))
+ case int64:
+ return tag("integer", fmt.Sprintf("%d", orig))
+ case float64:
+ return tag("float", fmt.Sprintf("%v", orig))
+ case string:
+ return tag("string", orig)
+ }
+
+ panic(fmt.Sprintf("Unknown type: %T", tomlData))
+}
+
+func tag(typeName string, data interface{}) map[string]interface{} {
+ return map[string]interface{}{
+ "type": typeName,
+ "value": data,
+ }
+}
diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING b/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING
new file mode 100644
index 0000000000..5a8e332545
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING
@@ -0,0 +1,14 @@
+ DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+ Version 2, December 2004
+
+ Copyright (C) 2004 Sam Hocevar
+
+ Everyone is permitted to copy and distribute verbatim or modified
+ copies of this license document, and changing it is allowed as long
+ as the name is changed.
+
+ DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. You just DO WHAT THE FUCK YOU WANT TO.
+
diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md b/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md
new file mode 100644
index 0000000000..45a603f298
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md
@@ -0,0 +1,14 @@
+# Implements the TOML test suite interface for TOML encoders
+
+This is an implementation of the interface expected by
+[toml-test](https://github.com/BurntSushi/toml-test) for the
+[TOML encoder](https://github.com/BurntSushi/toml).
+In particular, it maps JSON data on `stdin` to a TOML format on `stdout`.
+
+
+Compatible with TOML version
+[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)
+
+Compatible with `toml-test` version
+[v0.2.0](https://github.com/BurntSushi/toml-test/tree/v0.2.0)
+
diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go b/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go
new file mode 100644
index 0000000000..092cc68449
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go
@@ -0,0 +1,131 @@
+// Command toml-test-encoder satisfies the toml-test interface for testing
+// TOML encoders. Namely, it accepts JSON on stdin and outputs TOML on stdout.
+package main
+
+import (
+ "encoding/json"
+ "flag"
+ "log"
+ "os"
+ "path"
+ "strconv"
+ "time"
+
+ "github.com/BurntSushi/toml"
+)
+
+func init() {
+ log.SetFlags(0)
+
+ flag.Usage = usage
+ flag.Parse()
+}
+
+func usage() {
+ log.Printf("Usage: %s < json-file\n", path.Base(os.Args[0]))
+ flag.PrintDefaults()
+
+ os.Exit(1)
+}
+
+func main() {
+ if flag.NArg() != 0 {
+ flag.Usage()
+ }
+
+ var tmp interface{}
+ if err := json.NewDecoder(os.Stdin).Decode(&tmp); err != nil {
+ log.Fatalf("Error decoding JSON: %s", err)
+ }
+
+ tomlData := translate(tmp)
+ if err := toml.NewEncoder(os.Stdout).Encode(tomlData); err != nil {
+ log.Fatalf("Error encoding TOML: %s", err)
+ }
+}
+
+func translate(typedJson interface{}) interface{} {
+ switch v := typedJson.(type) {
+ case map[string]interface{}:
+ if len(v) == 2 && in("type", v) && in("value", v) {
+ return untag(v)
+ }
+ m := make(map[string]interface{}, len(v))
+ for k, v2 := range v {
+ m[k] = translate(v2)
+ }
+ return m
+ case []interface{}:
+ tabArray := make([]map[string]interface{}, len(v))
+ for i := range v {
+ if m, ok := translate(v[i]).(map[string]interface{}); ok {
+ tabArray[i] = m
+ } else {
+ log.Fatalf("JSON arrays may only contain objects. This " +
+ "corresponds to only tables being allowed in " +
+ "TOML table arrays.")
+ }
+ }
+ return tabArray
+ }
+ log.Fatalf("Unrecognized JSON format '%T'.", typedJson)
+ panic("unreachable")
+}
+
+func untag(typed map[string]interface{}) interface{} {
+ t := typed["type"].(string)
+ v := typed["value"]
+ switch t {
+ case "string":
+ return v.(string)
+ case "integer":
+ v := v.(string)
+ n, err := strconv.Atoi(v)
+ if err != nil {
+ log.Fatalf("Could not parse '%s' as integer: %s", v, err)
+ }
+ return n
+ case "float":
+ v := v.(string)
+ f, err := strconv.ParseFloat(v, 64)
+ if err != nil {
+ log.Fatalf("Could not parse '%s' as float64: %s", v, err)
+ }
+ return f
+ case "datetime":
+ v := v.(string)
+ t, err := time.Parse("2006-01-02T15:04:05Z", v)
+ if err != nil {
+ log.Fatalf("Could not parse '%s' as a datetime: %s", v, err)
+ }
+ return t
+ case "bool":
+ v := v.(string)
+ switch v {
+ case "true":
+ return true
+ case "false":
+ return false
+ }
+ log.Fatalf("Could not parse '%s' as a boolean.", v)
+ case "array":
+ v := v.([]interface{})
+ array := make([]interface{}, len(v))
+ for i := range v {
+ if m, ok := v[i].(map[string]interface{}); ok {
+ array[i] = untag(m)
+ } else {
+ log.Fatalf("Arrays may only contain other arrays or "+
+ "primitive values, but found a '%T'.", m)
+ }
+ }
+ return array
+ }
+ log.Fatalf("Unrecognized tag type '%s'.", t)
+ panic("unreachable")
+}
+
+func in(key string, m map[string]interface{}) bool {
+ _, ok := m[key]
+ return ok
+}
diff --git a/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING b/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING
new file mode 100644
index 0000000000..5a8e332545
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING
@@ -0,0 +1,14 @@
+ DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+ Version 2, December 2004
+
+ Copyright (C) 2004 Sam Hocevar
+
+ Everyone is permitted to copy and distribute verbatim or modified
+ copies of this license document, and changing it is allowed as long
+ as the name is changed.
+
+ DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. You just DO WHAT THE FUCK YOU WANT TO.
+
diff --git a/vendor/github.com/BurntSushi/toml/cmd/tomlv/README.md b/vendor/github.com/BurntSushi/toml/cmd/tomlv/README.md
new file mode 100644
index 0000000000..5df0dc32bb
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/cmd/tomlv/README.md
@@ -0,0 +1,22 @@
+# TOML Validator
+
+If Go is installed, it's simple to try it out:
+
+```bash
+go get github.com/BurntSushi/toml/cmd/tomlv
+tomlv some-toml-file.toml
+```
+
+You can see the types of every key in a TOML file with:
+
+```bash
+tomlv -types some-toml-file.toml
+```
+
+At the moment, only one error message is reported at a time. Error messages
+include line numbers. No output means that the files given are valid TOML, or
+there is a bug in `tomlv`.
+
+Compatible with TOML version
+[v0.1.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.1.0.md)
+
diff --git a/vendor/github.com/BurntSushi/toml/cmd/tomlv/main.go b/vendor/github.com/BurntSushi/toml/cmd/tomlv/main.go
new file mode 100644
index 0000000000..c7d689a7e9
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/cmd/tomlv/main.go
@@ -0,0 +1,61 @@
+// Command tomlv validates TOML documents and prints each key's type.
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "os"
+ "path"
+ "strings"
+ "text/tabwriter"
+
+ "github.com/BurntSushi/toml"
+)
+
+var (
+ flagTypes = false
+)
+
+func init() {
+ log.SetFlags(0)
+
+ flag.BoolVar(&flagTypes, "types", flagTypes,
+ "When set, the types of every defined key will be shown.")
+
+ flag.Usage = usage
+ flag.Parse()
+}
+
+func usage() {
+ log.Printf("Usage: %s toml-file [ toml-file ... ]\n",
+ path.Base(os.Args[0]))
+ flag.PrintDefaults()
+
+ os.Exit(1)
+}
+
+func main() {
+ if flag.NArg() < 1 {
+ flag.Usage()
+ }
+ for _, f := range flag.Args() {
+ var tmp interface{}
+ md, err := toml.DecodeFile(f, &tmp)
+ if err != nil {
+ log.Fatalf("Error in '%s': %s", f, err)
+ }
+ if flagTypes {
+ printTypes(md)
+ }
+ }
+}
+
+func printTypes(md toml.MetaData) {
+ tabw := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0)
+ for _, key := range md.Keys() {
+ fmt.Fprintf(tabw, "%s%s\t%s\n",
+ strings.Repeat(" ", len(key)-1), key, md.Type(key...))
+ }
+ tabw.Flush()
+}
diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go
new file mode 100644
index 0000000000..c26b00c014
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/decode.go
@@ -0,0 +1,505 @@
+package toml
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "reflect"
+ "strings"
+ "time"
+)
+
+var e = fmt.Errorf
+
+// Unmarshaler is the interface implemented by objects that can unmarshal a
+// TOML description of themselves.
+type Unmarshaler interface {
+ UnmarshalTOML(interface{}) error
+}
+
+// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
+func Unmarshal(p []byte, v interface{}) error {
+ _, err := Decode(string(p), v)
+ return err
+}
+
+// Primitive is a TOML value that hasn't been decoded into a Go value.
+// When using the various `Decode*` functions, the type `Primitive` may
+// be given to any value, and its decoding will be delayed.
+//
+// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
+//
+// The underlying representation of a `Primitive` value is subject to change.
+// Do not rely on it.
+//
+// N.B. Primitive values are still parsed, so using them will only avoid
+// the overhead of reflection. They can be useful when you don't know the
+// exact type of TOML data until run time.
+type Primitive struct {
+ undecoded interface{}
+ context Key
+}
+
+// DEPRECATED!
+//
+// Use MetaData.PrimitiveDecode instead.
+func PrimitiveDecode(primValue Primitive, v interface{}) error {
+ md := MetaData{decoded: make(map[string]bool)}
+ return md.unify(primValue.undecoded, rvalue(v))
+}
+
+// PrimitiveDecode is just like the other `Decode*` functions, except it
+// decodes a TOML value that has already been parsed. Valid primitive values
+// can *only* be obtained from values filled by the decoder functions,
+// including this method. (i.e., `v` may contain more `Primitive`
+// values.)
+//
+// Meta data for primitive values is included in the meta data returned by
+// the `Decode*` functions with one exception: keys returned by the Undecoded
+// method will only reflect keys that were decoded. Namely, any keys hidden
+// behind a Primitive will be considered undecoded. Executing this method will
+// update the undecoded keys in the meta data. (See the example.)
+func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
+ md.context = primValue.context
+ defer func() { md.context = nil }()
+ return md.unify(primValue.undecoded, rvalue(v))
+}
+
+// Decode will decode the contents of `data` in TOML format into a pointer
+// `v`.
+//
+// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
+// used interchangeably.)
+//
+// TOML arrays of tables correspond to either a slice of structs or a slice
+// of maps.
+//
+// TOML datetimes correspond to Go `time.Time` values.
+//
+// All other TOML types (float, string, int, bool and array) correspond
+// to the obvious Go types.
+//
+// An exception to the above rules is if a type implements the
+// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
+// (floats, strings, integers, booleans and datetimes) will be converted to
+// a byte string and given to the value's UnmarshalText method. See the
+// Unmarshaler example for a demonstration with time duration strings.
+//
+// Key mapping
+//
+// TOML keys can map to either keys in a Go map or field names in a Go
+// struct. The special `toml` struct tag may be used to map TOML keys to
+// struct fields that don't match the key name exactly. (See the example.)
+// A case insensitive match to struct names will be tried if an exact match
+// can't be found.
+//
+// The mapping between TOML values and Go values is loose. That is, there
+// may exist TOML values that cannot be placed into your representation, and
+// there may be parts of your representation that do not correspond to
+// TOML values. This loose mapping can be made stricter by using the IsDefined
+// and/or Undecoded methods on the MetaData returned.
+//
+// This decoder will not handle cyclic types. If a cyclic type is passed,
+// `Decode` will not terminate.
+func Decode(data string, v interface{}) (MetaData, error) {
+ p, err := parse(data)
+ if err != nil {
+ return MetaData{}, err
+ }
+ md := MetaData{
+ p.mapping, p.types, p.ordered,
+ make(map[string]bool, len(p.ordered)), nil,
+ }
+ return md, md.unify(p.mapping, rvalue(v))
+}
+
+// DecodeFile is just like Decode, except it will automatically read the
+// contents of the file at `fpath` and decode it for you.
+func DecodeFile(fpath string, v interface{}) (MetaData, error) {
+ bs, err := ioutil.ReadFile(fpath)
+ if err != nil {
+ return MetaData{}, err
+ }
+ return Decode(string(bs), v)
+}
+
+// DecodeReader is just like Decode, except it will consume all bytes
+// from the reader and decode it for you.
+func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
+ bs, err := ioutil.ReadAll(r)
+ if err != nil {
+ return MetaData{}, err
+ }
+ return Decode(string(bs), v)
+}
+
+// unify performs a sort of type unification based on the structure of `rv`,
+// which is the client representation.
+//
+// Any type mismatch produces an error. Finding a type that we don't know
+// how to handle produces an unsupported type error.
+func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
+
+ // Special case. Look for a `Primitive` value.
+ if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
+ // Save the undecoded data and the key context into the primitive
+ // value.
+ context := make(Key, len(md.context))
+ copy(context, md.context)
+ rv.Set(reflect.ValueOf(Primitive{
+ undecoded: data,
+ context: context,
+ }))
+ return nil
+ }
+
+ // Special case. Unmarshaler Interface support.
+ if rv.CanAddr() {
+ if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
+ return v.UnmarshalTOML(data)
+ }
+ }
+
+ // Special case. Handle time.Time values specifically.
+ // TODO: Remove this code when we decide to drop support for Go 1.1.
+ // This isn't necessary in Go 1.2 because time.Time satisfies the encoding
+ // interfaces.
+ if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
+ return md.unifyDatetime(data, rv)
+ }
+
+ // Special case. Look for a value satisfying the TextUnmarshaler interface.
+ if v, ok := rv.Interface().(TextUnmarshaler); ok {
+ return md.unifyText(data, v)
+ }
+ // BUG(burntsushi)
+ // The behavior here is incorrect whenever a Go type satisfies the
+ // encoding.TextUnmarshaler interface but also corresponds to a TOML
+ // hash or array. In particular, the unmarshaler should only be applied
+ // to primitive TOML values. But at this point, it will be applied to
+ // all kinds of values and produce an incorrect error whenever those values
+ // are hashes or arrays (including arrays of tables).
+
+ k := rv.Kind()
+
+ // laziness
+ if k >= reflect.Int && k <= reflect.Uint64 {
+ return md.unifyInt(data, rv)
+ }
+ switch k {
+ case reflect.Ptr:
+ elem := reflect.New(rv.Type().Elem())
+ err := md.unify(data, reflect.Indirect(elem))
+ if err != nil {
+ return err
+ }
+ rv.Set(elem)
+ return nil
+ case reflect.Struct:
+ return md.unifyStruct(data, rv)
+ case reflect.Map:
+ return md.unifyMap(data, rv)
+ case reflect.Array:
+ return md.unifyArray(data, rv)
+ case reflect.Slice:
+ return md.unifySlice(data, rv)
+ case reflect.String:
+ return md.unifyString(data, rv)
+ case reflect.Bool:
+ return md.unifyBool(data, rv)
+ case reflect.Interface:
+ // we only support empty interfaces.
+ if rv.NumMethod() > 0 {
+ return e("Unsupported type '%s'.", rv.Kind())
+ }
+ return md.unifyAnything(data, rv)
+ case reflect.Float32:
+ fallthrough
+ case reflect.Float64:
+ return md.unifyFloat64(data, rv)
+ }
+ return e("Unsupported type '%s'.", rv.Kind())
+}
+
+func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
+ tmap, ok := mapping.(map[string]interface{})
+ if !ok {
+ if mapping == nil {
+ return nil
+ }
+ return mismatch(rv, "map", mapping)
+ }
+
+ for key, datum := range tmap {
+ var f *field
+ fields := cachedTypeFields(rv.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if ff.name == key {
+ f = ff
+ break
+ }
+ if f == nil && strings.EqualFold(ff.name, key) {
+ f = ff
+ }
+ }
+ if f != nil {
+ subv := rv
+ for _, i := range f.index {
+ subv = indirect(subv.Field(i))
+ }
+ if isUnifiable(subv) {
+ md.decoded[md.context.add(key).String()] = true
+ md.context = append(md.context, key)
+ if err := md.unify(datum, subv); err != nil {
+ return e("Type mismatch for '%s.%s': %s",
+ rv.Type().String(), f.name, err)
+ }
+ md.context = md.context[0 : len(md.context)-1]
+ } else if f.name != "" {
+ // Bad user! No soup for you!
+ return e("Field '%s.%s' is unexported, and therefore cannot "+
+ "be loaded with reflection.", rv.Type().String(), f.name)
+ }
+ }
+ }
+ return nil
+}
+
+func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
+ tmap, ok := mapping.(map[string]interface{})
+ if !ok {
+ if tmap == nil {
+ return nil
+ }
+ return badtype("map", mapping)
+ }
+ if rv.IsNil() {
+ rv.Set(reflect.MakeMap(rv.Type()))
+ }
+ for k, v := range tmap {
+ md.decoded[md.context.add(k).String()] = true
+ md.context = append(md.context, k)
+
+ rvkey := indirect(reflect.New(rv.Type().Key()))
+ rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
+ if err := md.unify(v, rvval); err != nil {
+ return err
+ }
+ md.context = md.context[0 : len(md.context)-1]
+
+ rvkey.SetString(k)
+ rv.SetMapIndex(rvkey, rvval)
+ }
+ return nil
+}
+
+func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
+ datav := reflect.ValueOf(data)
+ if datav.Kind() != reflect.Slice {
+ if !datav.IsValid() {
+ return nil
+ }
+ return badtype("slice", data)
+ }
+ sliceLen := datav.Len()
+ if sliceLen != rv.Len() {
+ return e("expected array length %d; got TOML array of length %d",
+ rv.Len(), sliceLen)
+ }
+ return md.unifySliceArray(datav, rv)
+}
+
+func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
+ datav := reflect.ValueOf(data)
+ if datav.Kind() != reflect.Slice {
+ if !datav.IsValid() {
+ return nil
+ }
+ return badtype("slice", data)
+ }
+ n := datav.Len()
+ if rv.IsNil() || rv.Cap() < n {
+ rv.Set(reflect.MakeSlice(rv.Type(), n, n))
+ }
+ rv.SetLen(n)
+ return md.unifySliceArray(datav, rv)
+}
+
+func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
+ sliceLen := data.Len()
+ for i := 0; i < sliceLen; i++ {
+ v := data.Index(i).Interface()
+ sliceval := indirect(rv.Index(i))
+ if err := md.unify(v, sliceval); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
+ if _, ok := data.(time.Time); ok {
+ rv.Set(reflect.ValueOf(data))
+ return nil
+ }
+ return badtype("time.Time", data)
+}
+
+func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
+ if s, ok := data.(string); ok {
+ rv.SetString(s)
+ return nil
+ }
+ return badtype("string", data)
+}
+
+func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
+ if num, ok := data.(float64); ok {
+ switch rv.Kind() {
+ case reflect.Float32:
+ fallthrough
+ case reflect.Float64:
+ rv.SetFloat(num)
+ default:
+ panic("bug")
+ }
+ return nil
+ }
+ return badtype("float", data)
+}
+
+func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
+ if num, ok := data.(int64); ok {
+ if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
+ switch rv.Kind() {
+ case reflect.Int, reflect.Int64:
+ // No bounds checking necessary.
+ case reflect.Int8:
+ if num < math.MinInt8 || num > math.MaxInt8 {
+ return e("Value '%d' is out of range for int8.", num)
+ }
+ case reflect.Int16:
+ if num < math.MinInt16 || num > math.MaxInt16 {
+ return e("Value '%d' is out of range for int16.", num)
+ }
+ case reflect.Int32:
+ if num < math.MinInt32 || num > math.MaxInt32 {
+ return e("Value '%d' is out of range for int32.", num)
+ }
+ }
+ rv.SetInt(num)
+ } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
+ unum := uint64(num)
+ switch rv.Kind() {
+ case reflect.Uint, reflect.Uint64:
+ // No bounds checking necessary.
+ case reflect.Uint8:
+ if num < 0 || unum > math.MaxUint8 {
+ return e("Value '%d' is out of range for uint8.", num)
+ }
+ case reflect.Uint16:
+ if num < 0 || unum > math.MaxUint16 {
+ return e("Value '%d' is out of range for uint16.", num)
+ }
+ case reflect.Uint32:
+ if num < 0 || unum > math.MaxUint32 {
+ return e("Value '%d' is out of range for uint32.", num)
+ }
+ }
+ rv.SetUint(unum)
+ } else {
+ panic("unreachable")
+ }
+ return nil
+ }
+ return badtype("integer", data)
+}
+
+func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
+ if b, ok := data.(bool); ok {
+ rv.SetBool(b)
+ return nil
+ }
+ return badtype("boolean", data)
+}
+
+func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
+ rv.Set(reflect.ValueOf(data))
+ return nil
+}
+
+func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
+ var s string
+ switch sdata := data.(type) {
+ case TextMarshaler:
+ text, err := sdata.MarshalText()
+ if err != nil {
+ return err
+ }
+ s = string(text)
+ case fmt.Stringer:
+ s = sdata.String()
+ case string:
+ s = sdata
+ case bool:
+ s = fmt.Sprintf("%v", sdata)
+ case int64:
+ s = fmt.Sprintf("%d", sdata)
+ case float64:
+ s = fmt.Sprintf("%f", sdata)
+ default:
+ return badtype("primitive (string-like)", data)
+ }
+ if err := v.UnmarshalText([]byte(s)); err != nil {
+ return err
+ }
+ return nil
+}
+
+// rvalue returns a reflect.Value of `v`. All pointers are resolved.
+func rvalue(v interface{}) reflect.Value {
+ return indirect(reflect.ValueOf(v))
+}
+
+// indirect returns the value pointed to by a pointer.
+// Pointers are followed until the value is not a pointer.
+// New values are allocated for each nil pointer.
+//
+// An exception to this rule is if the value satisfies an interface of
+// interest to us (like encoding.TextUnmarshaler).
+func indirect(v reflect.Value) reflect.Value {
+ if v.Kind() != reflect.Ptr {
+ if v.CanAddr() {
+ pv := v.Addr()
+ if _, ok := pv.Interface().(TextUnmarshaler); ok {
+ return pv
+ }
+ }
+ return v
+ }
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ return indirect(reflect.Indirect(v))
+}
+
+func isUnifiable(rv reflect.Value) bool {
+ if rv.CanSet() {
+ return true
+ }
+ if _, ok := rv.Interface().(TextUnmarshaler); ok {
+ return true
+ }
+ return false
+}
+
+func badtype(expected string, data interface{}) error {
+ return e("Expected %s but found '%T'.", expected, data)
+}
+
+func mismatch(user reflect.Value, expected string, data interface{}) error {
+ return e("Type mismatch for %s. Expected %s but found '%T'.",
+ user.Type().String(), expected, data)
+}
diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go
new file mode 100644
index 0000000000..ef6f545fa1
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/decode_meta.go
@@ -0,0 +1,122 @@
+package toml
+
+import "strings"
+
+// MetaData allows access to meta information about TOML data that may not
+// be inferrable via reflection. In particular, whether a key has been defined
+// and the TOML type of a key.
+type MetaData struct {
+ mapping map[string]interface{}
+ types map[string]tomlType
+ keys []Key
+ decoded map[string]bool
+ context Key // Used only during decoding.
+}
+
+// IsDefined returns true if the key given exists in the TOML data. The key
+// should be specified hierarchially. e.g.,
+//
+// // access the TOML key 'a.b.c'
+// IsDefined("a", "b", "c")
+//
+// IsDefined will return false if an empty key given. Keys are case sensitive.
+func (md *MetaData) IsDefined(key ...string) bool {
+ if len(key) == 0 {
+ return false
+ }
+
+ var hash map[string]interface{}
+ var ok bool
+ var hashOrVal interface{} = md.mapping
+ for _, k := range key {
+ if hash, ok = hashOrVal.(map[string]interface{}); !ok {
+ return false
+ }
+ if hashOrVal, ok = hash[k]; !ok {
+ return false
+ }
+ }
+ return true
+}
+
+// Type returns a string representation of the type of the key specified.
+//
+// Type will return the empty string if given an empty key or a key that
+// does not exist. Keys are case sensitive.
+func (md *MetaData) Type(key ...string) string {
+ fullkey := strings.Join(key, ".")
+ if typ, ok := md.types[fullkey]; ok {
+ return typ.typeString()
+ }
+ return ""
+}
+
+// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
+// to get values of this type.
+type Key []string
+
+func (k Key) String() string {
+ return strings.Join(k, ".")
+}
+
+func (k Key) maybeQuotedAll() string {
+ var ss []string
+ for i := range k {
+ ss = append(ss, k.maybeQuoted(i))
+ }
+ return strings.Join(ss, ".")
+}
+
+func (k Key) maybeQuoted(i int) string {
+ quote := false
+ for _, c := range k[i] {
+ if !isBareKeyChar(c) {
+ quote = true
+ break
+ }
+ }
+ if quote {
+ return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
+ } else {
+ return k[i]
+ }
+}
+
+func (k Key) add(piece string) Key {
+ newKey := make(Key, len(k)+1)
+ copy(newKey, k)
+ newKey[len(k)] = piece
+ return newKey
+}
+
+// Keys returns a slice of every key in the TOML data, including key groups.
+// Each key is itself a slice, where the first element is the top of the
+// hierarchy and the last is the most specific.
+//
+// The list will have the same order as the keys appeared in the TOML data.
+//
+// All keys returned are non-empty.
+func (md *MetaData) Keys() []Key {
+ return md.keys
+}
+
+// Undecoded returns all keys that have not been decoded in the order in which
+// they appear in the original TOML document.
+//
+// This includes keys that haven't been decoded because of a Primitive value.
+// Once the Primitive value is decoded, the keys will be considered decoded.
+//
+// Also note that decoding into an empty interface will result in no decoding,
+// and so no keys will be considered decoded.
+//
+// In this sense, the Undecoded keys correspond to keys in the TOML document
+// that do not have a concrete type in your representation.
+func (md *MetaData) Undecoded() []Key {
+ undecoded := make([]Key, 0, len(md.keys))
+ for _, key := range md.keys {
+ if !md.decoded[key.String()] {
+ undecoded = append(undecoded, key)
+ }
+ }
+ return undecoded
+}
diff --git a/vendor/github.com/BurntSushi/toml/decode_test.go b/vendor/github.com/BurntSushi/toml/decode_test.go
new file mode 100644
index 0000000000..213e70dcaf
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/decode_test.go
@@ -0,0 +1,1092 @@
+package toml
+
+import (
+ "fmt"
+ "log"
+ "reflect"
+ "testing"
+ "time"
+)
+
+func init() {
+ log.SetFlags(0)
+}
+
+func TestDecodeSimple(t *testing.T) {
+ var testSimple = `
+age = 250
+andrew = "gallant"
+kait = "brady"
+now = 1987-07-05T05:45:00Z
+yesOrNo = true
+pi = 3.14
+colors = [
+ ["red", "green", "blue"],
+ ["cyan", "magenta", "yellow", "black"],
+]
+
+[My.Cats]
+plato = "cat 1"
+cauchy = "cat 2"
+`
+
+ type cats struct {
+ Plato string
+ Cauchy string
+ }
+ type simple struct {
+ Age int
+ Colors [][]string
+ Pi float64
+ YesOrNo bool
+ Now time.Time
+ Andrew string
+ Kait string
+ My map[string]cats
+ }
+
+ var val simple
+ _, err := Decode(testSimple, &val)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ now, err := time.Parse("2006-01-02T15:04:05", "1987-07-05T05:45:00")
+ if err != nil {
+ panic(err)
+ }
+ var answer = simple{
+ Age: 250,
+ Andrew: "gallant",
+ Kait: "brady",
+ Now: now,
+ YesOrNo: true,
+ Pi: 3.14,
+ Colors: [][]string{
+ {"red", "green", "blue"},
+ {"cyan", "magenta", "yellow", "black"},
+ },
+ My: map[string]cats{
+ "Cats": {Plato: "cat 1", Cauchy: "cat 2"},
+ },
+ }
+ if !reflect.DeepEqual(val, answer) {
+ t.Fatalf("Expected\n-----\n%#v\n-----\nbut got\n-----\n%#v\n",
+ answer, val)
+ }
+}
+
+func TestDecodeEmbedded(t *testing.T) {
+ type Dog struct{ Name string }
+ type Age int
+
+ tests := map[string]struct {
+ input string
+ decodeInto interface{}
+ wantDecoded interface{}
+ }{
+ "embedded struct": {
+ input: `Name = "milton"`,
+ decodeInto: &struct{ Dog }{},
+ wantDecoded: &struct{ Dog }{Dog{"milton"}},
+ },
+ "embedded non-nil pointer to struct": {
+ input: `Name = "milton"`,
+ decodeInto: &struct{ *Dog }{},
+ wantDecoded: &struct{ *Dog }{&Dog{"milton"}},
+ },
+ "embedded nil pointer to struct": {
+ input: ``,
+ decodeInto: &struct{ *Dog }{},
+ wantDecoded: &struct{ *Dog }{nil},
+ },
+ "embedded int": {
+ input: `Age = -5`,
+ decodeInto: &struct{ Age }{},
+ wantDecoded: &struct{ Age }{-5},
+ },
+ }
+
+ for label, test := range tests {
+ _, err := Decode(test.input, test.decodeInto)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(test.wantDecoded, test.decodeInto) {
+ t.Errorf("%s: want decoded == %+v, got %+v",
+ label, test.wantDecoded, test.decodeInto)
+ }
+ }
+}
+
+func TestDecodeIgnoredFields(t *testing.T) {
+ type simple struct {
+ Number int `toml:"-"`
+ }
+ const input = `
+Number = 123
+- = 234
+`
+ var s simple
+ if _, err := Decode(input, &s); err != nil {
+ t.Fatal(err)
+ }
+ if s.Number != 0 {
+ t.Errorf("got: %d; want 0", s.Number)
+ }
+}
+
+func TestTableArrays(t *testing.T) {
+ var tomlTableArrays = `
+[[albums]]
+name = "Born to Run"
+
+ [[albums.songs]]
+ name = "Jungleland"
+
+ [[albums.songs]]
+ name = "Meeting Across the River"
+
+[[albums]]
+name = "Born in the USA"
+
+ [[albums.songs]]
+ name = "Glory Days"
+
+ [[albums.songs]]
+ name = "Dancing in the Dark"
+`
+
+ type Song struct {
+ Name string
+ }
+
+ type Album struct {
+ Name string
+ Songs []Song
+ }
+
+ type Music struct {
+ Albums []Album
+ }
+
+ expected := Music{[]Album{
+ {"Born to Run", []Song{{"Jungleland"}, {"Meeting Across the River"}}},
+ {"Born in the USA", []Song{{"Glory Days"}, {"Dancing in the Dark"}}},
+ }}
+ var got Music
+ if _, err := Decode(tomlTableArrays, &got); err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(expected, got) {
+ t.Fatalf("\n%#v\n!=\n%#v\n", expected, got)
+ }
+}
+
+// Case insensitive matching tests.
+// A bit more comprehensive than needed given the current implementation,
+// but implementations change.
+// Probably still missing demonstrations of some ugly corner cases regarding
+// case insensitive matching and multiple fields.
+func TestCase(t *testing.T) {
+ var caseToml = `
+tOpString = "string"
+tOpInt = 1
+tOpFloat = 1.1
+tOpBool = true
+tOpdate = 2006-01-02T15:04:05Z
+tOparray = [ "array" ]
+Match = "i should be in Match only"
+MatcH = "i should be in MatcH only"
+once = "just once"
+[nEst.eD]
+nEstedString = "another string"
+`
+
+ type InsensitiveEd struct {
+ NestedString string
+ }
+
+ type InsensitiveNest struct {
+ Ed InsensitiveEd
+ }
+
+ type Insensitive struct {
+ TopString string
+ TopInt int
+ TopFloat float64
+ TopBool bool
+ TopDate time.Time
+ TopArray []string
+ Match string
+ MatcH string
+ Once string
+ OncE string
+ Nest InsensitiveNest
+ }
+
+ tme, err := time.Parse(time.RFC3339, time.RFC3339[:len(time.RFC3339)-5])
+ if err != nil {
+ panic(err)
+ }
+ expected := Insensitive{
+ TopString: "string",
+ TopInt: 1,
+ TopFloat: 1.1,
+ TopBool: true,
+ TopDate: tme,
+ TopArray: []string{"array"},
+ MatcH: "i should be in MatcH only",
+ Match: "i should be in Match only",
+ Once: "just once",
+ OncE: "",
+ Nest: InsensitiveNest{
+ Ed: InsensitiveEd{NestedString: "another string"},
+ },
+ }
+ var got Insensitive
+ if _, err := Decode(caseToml, &got); err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(expected, got) {
+ t.Fatalf("\n%#v\n!=\n%#v\n", expected, got)
+ }
+}
+
+func TestPointers(t *testing.T) {
+ type Object struct {
+ Type string
+ Description string
+ }
+
+ type Dict struct {
+ NamedObject map[string]*Object
+ BaseObject *Object
+ Strptr *string
+ Strptrs []*string
+ }
+ s1, s2, s3 := "blah", "abc", "def"
+ expected := &Dict{
+ Strptr: &s1,
+ Strptrs: []*string{&s2, &s3},
+ NamedObject: map[string]*Object{
+ "foo": {"FOO", "fooooo!!!"},
+ "bar": {"BAR", "ba-ba-ba-ba-barrrr!!!"},
+ },
+ BaseObject: &Object{"BASE", "da base"},
+ }
+
+ ex1 := `
+Strptr = "blah"
+Strptrs = ["abc", "def"]
+
+[NamedObject.foo]
+Type = "FOO"
+Description = "fooooo!!!"
+
+[NamedObject.bar]
+Type = "BAR"
+Description = "ba-ba-ba-ba-barrrr!!!"
+
+[BaseObject]
+Type = "BASE"
+Description = "da base"
+`
+ dict := new(Dict)
+ _, err := Decode(ex1, dict)
+ if err != nil {
+ t.Errorf("Decode error: %v", err)
+ }
+ if !reflect.DeepEqual(expected, dict) {
+ t.Fatalf("\n%#v\n!=\n%#v\n", expected, dict)
+ }
+}
+
+func TestDecodeBadTimestamp(t *testing.T) {
+ var x struct {
+ T time.Time
+ }
+ for _, s := range []string{
+ "T = 123", "T = 2006-01-50T00:00:00Z", "T = 2006-01-30T00:00:00",
+ } {
+ if _, err := Decode(s, &x); err == nil {
+ t.Errorf("Expected invalid DateTime error for %q", s)
+ }
+ }
+}
+
+func TestDecodeMultilineStrings(t *testing.T) {
+ var x struct {
+ S string
+ }
+ const s0 = `s = """
+a b \n c
+d e f
+"""`
+ if _, err := Decode(s0, &x); err != nil {
+ t.Fatal(err)
+ }
+ if want := "a b \n c\nd e f\n"; x.S != want {
+ t.Errorf("got: %q; want: %q", x.S, want)
+ }
+ const s1 = `s = """a b c\
+"""`
+ if _, err := Decode(s1, &x); err != nil {
+ t.Fatal(err)
+ }
+ if want := "a b c"; x.S != want {
+ t.Errorf("got: %q; want: %q", x.S, want)
+ }
+}
+
+type sphere struct {
+ Center [3]float64
+ Radius float64
+}
+
+func TestDecodeSimpleArray(t *testing.T) {
+ var s1 sphere
+ if _, err := Decode(`center = [0.0, 1.5, 0.0]`, &s1); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestDecodeArrayWrongSize(t *testing.T) {
+ var s1 sphere
+ if _, err := Decode(`center = [0.1, 2.3]`, &s1); err == nil {
+ t.Fatal("Expected array type mismatch error")
+ }
+}
+
+func TestDecodeLargeIntoSmallInt(t *testing.T) {
+ type table struct {
+ Value int8
+ }
+ var tab table
+ if _, err := Decode(`value = 500`, &tab); err == nil {
+ t.Fatal("Expected integer out-of-bounds error.")
+ }
+}
+
+func TestDecodeSizedInts(t *testing.T) {
+ type table struct {
+ U8 uint8
+ U16 uint16
+ U32 uint32
+ U64 uint64
+ U uint
+ I8 int8
+ I16 int16
+ I32 int32
+ I64 int64
+ I int
+ }
+ answer := table{1, 1, 1, 1, 1, -1, -1, -1, -1, -1}
+ toml := `
+ u8 = 1
+ u16 = 1
+ u32 = 1
+ u64 = 1
+ u = 1
+ i8 = -1
+ i16 = -1
+ i32 = -1
+ i64 = -1
+ i = -1
+ `
+ var tab table
+ if _, err := Decode(toml, &tab); err != nil {
+ t.Fatal(err.Error())
+ }
+ if answer != tab {
+ t.Fatalf("Expected %#v but got %#v", answer, tab)
+ }
+}
+
+func TestUnmarshaler(t *testing.T) {
+
+ var tomlBlob = `
+[dishes.hamboogie]
+name = "Hamboogie with fries"
+price = 10.99
+
+[[dishes.hamboogie.ingredients]]
+name = "Bread Bun"
+
+[[dishes.hamboogie.ingredients]]
+name = "Lettuce"
+
+[[dishes.hamboogie.ingredients]]
+name = "Real Beef Patty"
+
+[[dishes.hamboogie.ingredients]]
+name = "Tomato"
+
+[dishes.eggsalad]
+name = "Egg Salad with rice"
+price = 3.99
+
+[[dishes.eggsalad.ingredients]]
+name = "Egg"
+
+[[dishes.eggsalad.ingredients]]
+name = "Mayo"
+
+[[dishes.eggsalad.ingredients]]
+name = "Rice"
+`
+ m := &menu{}
+ if _, err := Decode(tomlBlob, m); err != nil {
+ log.Fatal(err)
+ }
+
+ if len(m.Dishes) != 2 {
+ t.Log("two dishes should be loaded with UnmarshalTOML()")
+ t.Errorf("expected %d but got %d", 2, len(m.Dishes))
+ }
+
+ eggSalad := m.Dishes["eggsalad"]
+ if _, ok := interface{}(eggSalad).(dish); !ok {
+ t.Errorf("expected a dish")
+ }
+
+ if eggSalad.Name != "Egg Salad with rice" {
+ t.Errorf("expected the dish to be named 'Egg Salad with rice'")
+ }
+
+ if len(eggSalad.Ingredients) != 3 {
+ t.Log("dish should be loaded with UnmarshalTOML()")
+ t.Errorf("expected %d but got %d", 3, len(eggSalad.Ingredients))
+ }
+
+ found := false
+ for _, i := range eggSalad.Ingredients {
+ if i.Name == "Rice" {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Error("Rice was not loaded in UnmarshalTOML()")
+ }
+
+ // test on a value - must be passed as *
+ o := menu{}
+ if _, err := Decode(tomlBlob, &o); err != nil {
+ log.Fatal(err)
+ }
+
+}
+
+type menu struct {
+ Dishes map[string]dish
+}
+
+func (m *menu) UnmarshalTOML(p interface{}) error {
+ m.Dishes = make(map[string]dish)
+ data, _ := p.(map[string]interface{})
+ dishes := data["dishes"].(map[string]interface{})
+ for n, v := range dishes {
+ if d, ok := v.(map[string]interface{}); ok {
+ nd := dish{}
+ nd.UnmarshalTOML(d)
+ m.Dishes[n] = nd
+ } else {
+ return fmt.Errorf("not a dish")
+ }
+ }
+ return nil
+}
+
+type dish struct {
+ Name string
+ Price float32
+ Ingredients []ingredient
+}
+
+func (d *dish) UnmarshalTOML(p interface{}) error {
+ data, _ := p.(map[string]interface{})
+ d.Name, _ = data["name"].(string)
+ d.Price, _ = data["price"].(float32)
+ ingredients, _ := data["ingredients"].([]map[string]interface{})
+ for _, e := range ingredients {
+ n, _ := interface{}(e).(map[string]interface{})
+ name, _ := n["name"].(string)
+ i := ingredient{name}
+ d.Ingredients = append(d.Ingredients, i)
+ }
+ return nil
+}
+
+type ingredient struct {
+ Name string
+}
+
+func TestDecodeSlices(t *testing.T) {
+ type T struct {
+ S []string
+ }
+ for i, tt := range []struct {
+ v T
+ input string
+ want T
+ }{
+ {T{}, "", T{}},
+ {T{[]string{}}, "", T{[]string{}}},
+ {T{[]string{"a", "b"}}, "", T{[]string{"a", "b"}}},
+ {T{}, "S = []", T{[]string{}}},
+ {T{[]string{}}, "S = []", T{[]string{}}},
+ {T{[]string{"a", "b"}}, "S = []", T{[]string{}}},
+ {T{}, `S = ["x"]`, T{[]string{"x"}}},
+ {T{[]string{}}, `S = ["x"]`, T{[]string{"x"}}},
+ {T{[]string{"a", "b"}}, `S = ["x"]`, T{[]string{"x"}}},
+ } {
+ if _, err := Decode(tt.input, &tt.v); err != nil {
+ t.Errorf("[%d] %s", i, err)
+ continue
+ }
+ if !reflect.DeepEqual(tt.v, tt.want) {
+ t.Errorf("[%d] got %#v; want %#v", i, tt.v, tt.want)
+ }
+ }
+}
+
+func TestDecodePrimitive(t *testing.T) {
+ type S struct {
+ P Primitive
+ }
+ type T struct {
+ S []int
+ }
+ slicep := func(s []int) *[]int { return &s }
+ arrayp := func(a [2]int) *[2]int { return &a }
+ mapp := func(m map[string]int) *map[string]int { return &m }
+ for i, tt := range []struct {
+ v interface{}
+ input string
+ want interface{}
+ }{
+ // slices
+ {slicep(nil), "", slicep(nil)},
+ {slicep([]int{}), "", slicep([]int{})},
+ {slicep([]int{1, 2, 3}), "", slicep([]int{1, 2, 3})},
+ {slicep(nil), "P = [1,2]", slicep([]int{1, 2})},
+ {slicep([]int{}), "P = [1,2]", slicep([]int{1, 2})},
+ {slicep([]int{1, 2, 3}), "P = [1,2]", slicep([]int{1, 2})},
+
+ // arrays
+ {arrayp([2]int{2, 3}), "", arrayp([2]int{2, 3})},
+ {arrayp([2]int{2, 3}), "P = [3,4]", arrayp([2]int{3, 4})},
+
+ // maps
+ {mapp(nil), "", mapp(nil)},
+ {mapp(map[string]int{}), "", mapp(map[string]int{})},
+ {mapp(map[string]int{"a": 1}), "", mapp(map[string]int{"a": 1})},
+ {mapp(nil), "[P]\na = 2", mapp(map[string]int{"a": 2})},
+ {mapp(map[string]int{}), "[P]\na = 2", mapp(map[string]int{"a": 2})},
+ {mapp(map[string]int{"a": 1, "b": 3}), "[P]\na = 2", mapp(map[string]int{"a": 2, "b": 3})},
+
+ // structs
+ {&T{nil}, "[P]", &T{nil}},
+ {&T{[]int{}}, "[P]", &T{[]int{}}},
+ {&T{[]int{1, 2, 3}}, "[P]", &T{[]int{1, 2, 3}}},
+ {&T{nil}, "[P]\nS = [1,2]", &T{[]int{1, 2}}},
+ {&T{[]int{}}, "[P]\nS = [1,2]", &T{[]int{1, 2}}},
+ {&T{[]int{1, 2, 3}}, "[P]\nS = [1,2]", &T{[]int{1, 2}}},
+ } {
+ var s S
+ md, err := Decode(tt.input, &s)
+ if err != nil {
+ t.Errorf("[%d] Decode error: %s", i, err)
+ continue
+ }
+ if err := md.PrimitiveDecode(s.P, tt.v); err != nil {
+ t.Errorf("[%d] PrimitiveDecode error: %s", i, err)
+ continue
+ }
+ if !reflect.DeepEqual(tt.v, tt.want) {
+ t.Errorf("[%d] got %#v; want %#v", i, tt.v, tt.want)
+ }
+ }
+}
+
+func ExampleMetaData_PrimitiveDecode() {
+ var md MetaData
+ var err error
+
+ var tomlBlob = `
+ranking = ["Springsteen", "J Geils"]
+
+[bands.Springsteen]
+started = 1973
+albums = ["Greetings", "WIESS", "Born to Run", "Darkness"]
+
+[bands."J Geils"]
+started = 1970
+albums = ["The J. Geils Band", "Full House", "Blow Your Face Out"]
+`
+
+ type band struct {
+ Started int
+ Albums []string
+ }
+ type classics struct {
+ Ranking []string
+ Bands map[string]Primitive
+ }
+
+ // Do the initial decode. Reflection is delayed on Primitive values.
+ var music classics
+ if md, err = Decode(tomlBlob, &music); err != nil {
+ log.Fatal(err)
+ }
+
+ // MetaData still includes information on Primitive values.
+ fmt.Printf("Is `bands.Springsteen` defined? %v\n",
+ md.IsDefined("bands", "Springsteen"))
+
+ // Decode primitive data into Go values.
+ for _, artist := range music.Ranking {
+ // A band is a primitive value, so we need to decode it to get a
+ // real `band` value.
+ primValue := music.Bands[artist]
+
+ var aBand band
+ if err = md.PrimitiveDecode(primValue, &aBand); err != nil {
+ log.Fatal(err)
+ }
+ fmt.Printf("%s started in %d.\n", artist, aBand.Started)
+ }
+ // Check to see if there were any fields left undecoded.
+ // Note that this won't be empty before decoding the Primitive value!
+ fmt.Printf("Undecoded: %q\n", md.Undecoded())
+
+ // Output:
+ // Is `bands.Springsteen` defined? true
+ // Springsteen started in 1973.
+ // J Geils started in 1970.
+ // Undecoded: []
+}
+
+func ExampleDecode() {
+ var tomlBlob = `
+# Some comments.
+[alpha]
+ip = "10.0.0.1"
+
+ [alpha.config]
+ Ports = [ 8001, 8002 ]
+ Location = "Toronto"
+ Created = 1987-07-05T05:45:00Z
+
+[beta]
+ip = "10.0.0.2"
+
+ [beta.config]
+ Ports = [ 9001, 9002 ]
+ Location = "New Jersey"
+ Created = 1887-01-05T05:55:00Z
+`
+
+ type serverConfig struct {
+ Ports []int
+ Location string
+ Created time.Time
+ }
+
+ type server struct {
+ IP string `toml:"ip,omitempty"`
+ Config serverConfig `toml:"config"`
+ }
+
+ type servers map[string]server
+
+ var config servers
+ if _, err := Decode(tomlBlob, &config); err != nil {
+ log.Fatal(err)
+ }
+
+ for _, name := range []string{"alpha", "beta"} {
+ s := config[name]
+ fmt.Printf("Server: %s (ip: %s) in %s created on %s\n",
+ name, s.IP, s.Config.Location,
+ s.Config.Created.Format("2006-01-02"))
+ fmt.Printf("Ports: %v\n", s.Config.Ports)
+ }
+
+ // Output:
+ // Server: alpha (ip: 10.0.0.1) in Toronto created on 1987-07-05
+ // Ports: [8001 8002]
+ // Server: beta (ip: 10.0.0.2) in New Jersey created on 1887-01-05
+ // Ports: [9001 9002]
+}
+
+type duration struct {
+ time.Duration
+}
+
+func (d *duration) UnmarshalText(text []byte) error {
+ var err error
+ d.Duration, err = time.ParseDuration(string(text))
+ return err
+}
+
+// Example Unmarshaler shows how to decode TOML strings into your own
+// custom data type.
+func Example_unmarshaler() {
+ blob := `
+[[song]]
+name = "Thunder Road"
+duration = "4m49s"
+
+[[song]]
+name = "Stairway to Heaven"
+duration = "8m03s"
+`
+ type song struct {
+ Name string
+ Duration duration
+ }
+ type songs struct {
+ Song []song
+ }
+ var favorites songs
+ if _, err := Decode(blob, &favorites); err != nil {
+ log.Fatal(err)
+ }
+
+ // Code to implement the TextUnmarshaler interface for `duration`:
+ //
+ // type duration struct {
+ // time.Duration
+ // }
+ //
+ // func (d *duration) UnmarshalText(text []byte) error {
+ // var err error
+ // d.Duration, err = time.ParseDuration(string(text))
+ // return err
+ // }
+
+ for _, s := range favorites.Song {
+ fmt.Printf("%s (%s)\n", s.Name, s.Duration)
+ }
+ // Output:
+ // Thunder Road (4m49s)
+ // Stairway to Heaven (8m3s)
+}
+
+// Example StrictDecoding shows how to detect whether there are keys in the
+// TOML document that weren't decoded into the value given. This is useful
+// for returning an error to the user if they've included extraneous fields
+// in their configuration.
+func Example_strictDecoding() {
+ var blob = `
+key1 = "value1"
+key2 = "value2"
+key3 = "value3"
+`
+ type config struct {
+ Key1 string
+ Key3 string
+ }
+
+ var conf config
+ md, err := Decode(blob, &conf)
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Printf("Undecoded keys: %q\n", md.Undecoded())
+ // Output:
+ // Undecoded keys: ["key2"]
+}
+
+// Example UnmarshalTOML shows how to implement a struct type that knows how to
+// unmarshal itself. The struct must take full responsibility for mapping the
+// values passed into the struct. The method may be used with interfaces in a
+// struct in cases where the actual type is not known until the data is
+// examined.
+func Example_unmarshalTOML() {
+
+ var blob = `
+[[parts]]
+type = "valve"
+id = "valve-1"
+size = 1.2
+rating = 4
+
+[[parts]]
+type = "valve"
+id = "valve-2"
+size = 2.1
+rating = 5
+
+[[parts]]
+type = "pipe"
+id = "pipe-1"
+length = 2.1
+diameter = 12
+
+[[parts]]
+type = "cable"
+id = "cable-1"
+length = 12
+rating = 3.1
+`
+ o := &order{}
+ err := Unmarshal([]byte(blob), o)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ fmt.Println(len(o.parts))
+
+ for _, part := range o.parts {
+ fmt.Println(part.Name())
+ }
+
+ // Code to implement UmarshalJSON.
+
+ // type order struct {
+ // // NOTE `order.parts` is a private slice of type `part` which is an
+ // // interface and may only be loaded from toml using the
+ // // UnmarshalTOML() method of the Umarshaler interface.
+ // parts parts
+ // }
+
+ // func (o *order) UnmarshalTOML(data interface{}) error {
+
+ // // NOTE the example below contains detailed type casting to show how
+ // // the 'data' is retrieved. In operational use, a type cast wrapper
+ // // may be prefered e.g.
+ // //
+ // // func AsMap(v interface{}) (map[string]interface{}, error) {
+ // // return v.(map[string]interface{})
+ // // }
+ // //
+ // // resulting in:
+ // // d, _ := AsMap(data)
+ // //
+
+ // d, _ := data.(map[string]interface{})
+ // parts, _ := d["parts"].([]map[string]interface{})
+
+ // for _, p := range parts {
+
+ // typ, _ := p["type"].(string)
+ // id, _ := p["id"].(string)
+
+ // // detect the type of part and handle each case
+ // switch p["type"] {
+ // case "valve":
+
+ // size := float32(p["size"].(float64))
+ // rating := int(p["rating"].(int64))
+
+ // valve := &valve{
+ // Type: typ,
+ // ID: id,
+ // Size: size,
+ // Rating: rating,
+ // }
+
+ // o.parts = append(o.parts, valve)
+
+ // case "pipe":
+
+ // length := float32(p["length"].(float64))
+ // diameter := int(p["diameter"].(int64))
+
+ // pipe := &pipe{
+ // Type: typ,
+ // ID: id,
+ // Length: length,
+ // Diameter: diameter,
+ // }
+
+ // o.parts = append(o.parts, pipe)
+
+ // case "cable":
+
+ // length := int(p["length"].(int64))
+ // rating := float32(p["rating"].(float64))
+
+ // cable := &cable{
+ // Type: typ,
+ // ID: id,
+ // Length: length,
+ // Rating: rating,
+ // }
+
+ // o.parts = append(o.parts, cable)
+
+ // }
+ // }
+
+ // return nil
+ // }
+
+ // type parts []part
+
+ // type part interface {
+ // Name() string
+ // }
+
+ // type valve struct {
+ // Type string
+ // ID string
+ // Size float32
+ // Rating int
+ // }
+
+ // func (v *valve) Name() string {
+ // return fmt.Sprintf("VALVE: %s", v.ID)
+ // }
+
+ // type pipe struct {
+ // Type string
+ // ID string
+ // Length float32
+ // Diameter int
+ // }
+
+ // func (p *pipe) Name() string {
+ // return fmt.Sprintf("PIPE: %s", p.ID)
+ // }
+
+ // type cable struct {
+ // Type string
+ // ID string
+ // Length int
+ // Rating float32
+ // }
+
+ // func (c *cable) Name() string {
+ // return fmt.Sprintf("CABLE: %s", c.ID)
+ // }
+
+ // Output:
+ // 4
+ // VALVE: valve-1
+ // VALVE: valve-2
+ // PIPE: pipe-1
+ // CABLE: cable-1
+
+}
+
+type order struct {
+ // NOTE `order.parts` is a private slice of type `part` which is an
+ // interface and may only be loaded from toml using the UnmarshalTOML()
+ // method of the Umarshaler interface.
+ parts parts
+}
+
+func (o *order) UnmarshalTOML(data interface{}) error {
+
+ // NOTE the example below contains detailed type casting to show how
+ // the 'data' is retrieved. In operational use, a type cast wrapper
+ // may be prefered e.g.
+ //
+ // func AsMap(v interface{}) (map[string]interface{}, error) {
+ // return v.(map[string]interface{})
+ // }
+ //
+ // resulting in:
+ // d, _ := AsMap(data)
+ //
+
+ d, _ := data.(map[string]interface{})
+ parts, _ := d["parts"].([]map[string]interface{})
+
+ for _, p := range parts {
+
+ typ, _ := p["type"].(string)
+ id, _ := p["id"].(string)
+
+ // detect the type of part and handle each case
+ switch p["type"] {
+ case "valve":
+
+ size := float32(p["size"].(float64))
+ rating := int(p["rating"].(int64))
+
+ valve := &valve{
+ Type: typ,
+ ID: id,
+ Size: size,
+ Rating: rating,
+ }
+
+ o.parts = append(o.parts, valve)
+
+ case "pipe":
+
+ length := float32(p["length"].(float64))
+ diameter := int(p["diameter"].(int64))
+
+ pipe := &pipe{
+ Type: typ,
+ ID: id,
+ Length: length,
+ Diameter: diameter,
+ }
+
+ o.parts = append(o.parts, pipe)
+
+ case "cable":
+
+ length := int(p["length"].(int64))
+ rating := float32(p["rating"].(float64))
+
+ cable := &cable{
+ Type: typ,
+ ID: id,
+ Length: length,
+ Rating: rating,
+ }
+
+ o.parts = append(o.parts, cable)
+
+ }
+ }
+
+ return nil
+}
+
+type parts []part
+
+type part interface {
+ Name() string
+}
+
+type valve struct {
+ Type string
+ ID string
+ Size float32
+ Rating int
+}
+
+func (v *valve) Name() string {
+ return fmt.Sprintf("VALVE: %s", v.ID)
+}
+
+type pipe struct {
+ Type string
+ ID string
+ Length float32
+ Diameter int
+}
+
+func (p *pipe) Name() string {
+ return fmt.Sprintf("PIPE: %s", p.ID)
+}
+
+type cable struct {
+ Type string
+ ID string
+ Length int
+ Rating float32
+}
+
+func (c *cable) Name() string {
+ return fmt.Sprintf("CABLE: %s", c.ID)
+}
diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go
new file mode 100644
index 0000000000..fe26800041
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/doc.go
@@ -0,0 +1,27 @@
+/*
+Package toml provides facilities for decoding and encoding TOML configuration
+files via reflection. There is also support for delaying decoding with
+the Primitive type, and querying the set of keys in a TOML document with the
+MetaData type.
+
+The specification implemented: https://github.com/mojombo/toml
+
+The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
+whether a file is a valid TOML document. It can also be used to print the
+type of each key in a TOML document.
+
+Testing
+
+There are two important types of tests used for this package. The first is
+contained inside '*_test.go' files and uses the standard Go unit testing
+framework. These tests are primarily devoted to holistically testing the
+decoder and encoder.
+
+The second type of testing is used to verify the implementation's adherence
+to the TOML specification. These tests have been factored into their own
+project: https://github.com/BurntSushi/toml-test
+
+The reason the tests are in a separate project is so that they can be used by
+any implementation of TOML. Namely, it is language agnostic.
+*/
+package toml
diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go
new file mode 100644
index 0000000000..4e4c97aed6
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/encode.go
@@ -0,0 +1,549 @@
+package toml
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type tomlEncodeError struct{ error }
+
+var (
+ errArrayMixedElementTypes = errors.New(
+ "can't encode array with mixed element types")
+ errArrayNilElement = errors.New(
+ "can't encode array with nil element")
+ errNonString = errors.New(
+ "can't encode a map with non-string key type")
+ errAnonNonStruct = errors.New(
+ "can't encode an anonymous field that is not a struct")
+ errArrayNoTable = errors.New(
+ "TOML array element can't contain a table")
+ errNoKey = errors.New(
+ "top-level values must be a Go map or struct")
+ errAnything = errors.New("") // used in testing
+)
+
+var quotedReplacer = strings.NewReplacer(
+ "\t", "\\t",
+ "\n", "\\n",
+ "\r", "\\r",
+ "\"", "\\\"",
+ "\\", "\\\\",
+)
+
+// Encoder controls the encoding of Go values to a TOML document to some
+// io.Writer.
+//
+// The indentation level can be controlled with the Indent field.
+type Encoder struct {
+ // A single indentation level. By default it is two spaces.
+ Indent string
+
+ // hasWritten is whether we have written any output to w yet.
+ hasWritten bool
+ w *bufio.Writer
+}
+
+// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
+// given. By default, a single indentation level is 2 spaces.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ w: bufio.NewWriter(w),
+ Indent: " ",
+ }
+}
+
+// Encode writes a TOML representation of the Go value to the underlying
+// io.Writer. If the value given cannot be encoded to a valid TOML document,
+// then an error is returned.
+//
+// The mapping between Go values and TOML values should be precisely the same
+// as for the Decode* functions. Similarly, the TextMarshaler interface is
+// supported by encoding the resulting bytes as strings. (If you want to write
+// arbitrary binary data then you will need to use something like base64 since
+// TOML does not have any binary types.)
+//
+// When encoding TOML hashes (i.e., Go maps or structs), keys without any
+// sub-hashes are encoded first.
+//
+// If a Go map is encoded, then its keys are sorted alphabetically for
+// deterministic output. More control over this behavior may be provided if
+// there is demand for it.
+//
+// Encoding Go values without a corresponding TOML representation---like map
+// types with non-string keys---will cause an error to be returned. Similarly
+// for mixed arrays/slices, arrays/slices with nil elements, embedded
+// non-struct types and nested slices containing maps or structs.
+// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
+// and so is []map[string][]string.)
+func (enc *Encoder) Encode(v interface{}) error {
+ rv := eindirect(reflect.ValueOf(v))
+ if err := enc.safeEncode(Key([]string{}), rv); err != nil {
+ return err
+ }
+ return enc.w.Flush()
+}
+
+func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if terr, ok := r.(tomlEncodeError); ok {
+ err = terr.error
+ return
+ }
+ panic(r)
+ }
+ }()
+ enc.encode(key, rv)
+ return nil
+}
+
+func (enc *Encoder) encode(key Key, rv reflect.Value) {
+ // Special case. Time needs to be in ISO8601 format.
+ // Special case. If we can marshal the type to text, then we used that.
+ // Basically, this prevents the encoder for handling these types as
+ // generic structs (or whatever the underlying type of a TextMarshaler is).
+ switch rv.Interface().(type) {
+ case time.Time, TextMarshaler:
+ enc.keyEqElement(key, rv)
+ return
+ }
+
+ k := rv.Kind()
+ switch k {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+ reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
+ reflect.Uint64,
+ reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
+ enc.keyEqElement(key, rv)
+ case reflect.Array, reflect.Slice:
+ if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
+ enc.eArrayOfTables(key, rv)
+ } else {
+ enc.keyEqElement(key, rv)
+ }
+ case reflect.Interface:
+ if rv.IsNil() {
+ return
+ }
+ enc.encode(key, rv.Elem())
+ case reflect.Map:
+ if rv.IsNil() {
+ return
+ }
+ enc.eTable(key, rv)
+ case reflect.Ptr:
+ if rv.IsNil() {
+ return
+ }
+ enc.encode(key, rv.Elem())
+ case reflect.Struct:
+ enc.eTable(key, rv)
+ default:
+ panic(e("Unsupported type for key '%s': %s", key, k))
+ }
+}
+
+// eElement encodes any value that can be an array element (primitives and
+// arrays).
+func (enc *Encoder) eElement(rv reflect.Value) {
+ switch v := rv.Interface().(type) {
+ case time.Time:
+ // Special case time.Time as a primitive. Has to come before
+ // TextMarshaler below because time.Time implements
+ // encoding.TextMarshaler, but we need to always use UTC.
+ enc.wf(v.In(time.FixedZone("UTC", 0)).Format("2006-01-02T15:04:05Z"))
+ return
+ case TextMarshaler:
+ // Special case. Use text marshaler if it's available for this value.
+ if s, err := v.MarshalText(); err != nil {
+ encPanic(err)
+ } else {
+ enc.writeQuoted(string(s))
+ }
+ return
+ }
+ switch rv.Kind() {
+ case reflect.Bool:
+ enc.wf(strconv.FormatBool(rv.Bool()))
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+ reflect.Int64:
+ enc.wf(strconv.FormatInt(rv.Int(), 10))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16,
+ reflect.Uint32, reflect.Uint64:
+ enc.wf(strconv.FormatUint(rv.Uint(), 10))
+ case reflect.Float32:
+ enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
+ case reflect.Float64:
+ enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
+ case reflect.Array, reflect.Slice:
+ enc.eArrayOrSliceElement(rv)
+ case reflect.Interface:
+ enc.eElement(rv.Elem())
+ case reflect.String:
+ enc.writeQuoted(rv.String())
+ default:
+ panic(e("Unexpected primitive type: %s", rv.Kind()))
+ }
+}
+
+// By the TOML spec, all floats must have a decimal with at least one
+// number on either side.
+func floatAddDecimal(fstr string) string {
+ if !strings.Contains(fstr, ".") {
+ return fstr + ".0"
+ }
+ return fstr
+}
+
+func (enc *Encoder) writeQuoted(s string) {
+ enc.wf("\"%s\"", quotedReplacer.Replace(s))
+}
+
+func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
+ length := rv.Len()
+ enc.wf("[")
+ for i := 0; i < length; i++ {
+ elem := rv.Index(i)
+ enc.eElement(elem)
+ if i != length-1 {
+ enc.wf(", ")
+ }
+ }
+ enc.wf("]")
+}
+
+func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
+ if len(key) == 0 {
+ encPanic(errNoKey)
+ }
+ for i := 0; i < rv.Len(); i++ {
+ trv := rv.Index(i)
+ if isNil(trv) {
+ continue
+ }
+ panicIfInvalidKey(key)
+ enc.newline()
+ enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
+ enc.newline()
+ enc.eMapOrStruct(key, trv)
+ }
+}
+
+func (enc *Encoder) eTable(key Key, rv reflect.Value) {
+ panicIfInvalidKey(key)
+ if len(key) == 1 {
+ // Output an extra new line between top-level tables.
+ // (The newline isn't written if nothing else has been written though.)
+ enc.newline()
+ }
+ if len(key) > 0 {
+ enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
+ enc.newline()
+ }
+ enc.eMapOrStruct(key, rv)
+}
+
+func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
+ switch rv := eindirect(rv); rv.Kind() {
+ case reflect.Map:
+ enc.eMap(key, rv)
+ case reflect.Struct:
+ enc.eStruct(key, rv)
+ default:
+ panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
+ }
+}
+
+func (enc *Encoder) eMap(key Key, rv reflect.Value) {
+ rt := rv.Type()
+ if rt.Key().Kind() != reflect.String {
+ encPanic(errNonString)
+ }
+
+ // Sort keys so that we have deterministic output. And write keys directly
+ // underneath this key first, before writing sub-structs or sub-maps.
+ var mapKeysDirect, mapKeysSub []string
+ for _, mapKey := range rv.MapKeys() {
+ k := mapKey.String()
+ if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
+ mapKeysSub = append(mapKeysSub, k)
+ } else {
+ mapKeysDirect = append(mapKeysDirect, k)
+ }
+ }
+
+ var writeMapKeys = func(mapKeys []string) {
+ sort.Strings(mapKeys)
+ for _, mapKey := range mapKeys {
+ mrv := rv.MapIndex(reflect.ValueOf(mapKey))
+ if isNil(mrv) {
+ // Don't write anything for nil fields.
+ continue
+ }
+ enc.encode(key.add(mapKey), mrv)
+ }
+ }
+ writeMapKeys(mapKeysDirect)
+ writeMapKeys(mapKeysSub)
+}
+
+func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
+ // Write keys for fields directly under this key first, because if we write
+ // a field that creates a new table, then all keys under it will be in that
+ // table (not the one we're writing here).
+ rt := rv.Type()
+ var fieldsDirect, fieldsSub [][]int
+ var addFields func(rt reflect.Type, rv reflect.Value, start []int)
+ addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
+ for i := 0; i < rt.NumField(); i++ {
+ f := rt.Field(i)
+ // skip unexported fields
+ if f.PkgPath != "" && !f.Anonymous {
+ continue
+ }
+ frv := rv.Field(i)
+ if f.Anonymous {
+ t := f.Type
+ switch t.Kind() {
+ case reflect.Struct:
+ addFields(t, frv, f.Index)
+ continue
+ case reflect.Ptr:
+ if t.Elem().Kind() == reflect.Struct {
+ if !frv.IsNil() {
+ addFields(t.Elem(), frv.Elem(), f.Index)
+ }
+ continue
+ }
+ // Fall through to the normal field encoding logic below
+ // for non-struct anonymous fields.
+ }
+ }
+
+ if typeIsHash(tomlTypeOfGo(frv)) {
+ fieldsSub = append(fieldsSub, append(start, f.Index...))
+ } else {
+ fieldsDirect = append(fieldsDirect, append(start, f.Index...))
+ }
+ }
+ }
+ addFields(rt, rv, nil)
+
+ var writeFields = func(fields [][]int) {
+ for _, fieldIndex := range fields {
+ sft := rt.FieldByIndex(fieldIndex)
+ sf := rv.FieldByIndex(fieldIndex)
+ if isNil(sf) {
+ // Don't write anything for nil fields.
+ continue
+ }
+
+ tag := sft.Tag.Get("toml")
+ if tag == "-" {
+ continue
+ }
+ keyName, opts := getOptions(tag)
+ if keyName == "" {
+ keyName = sft.Name
+ }
+ if _, ok := opts["omitempty"]; ok && isEmpty(sf) {
+ continue
+ } else if _, ok := opts["omitzero"]; ok && isZero(sf) {
+ continue
+ }
+
+ enc.encode(key.add(keyName), sf)
+ }
+ }
+ writeFields(fieldsDirect)
+ writeFields(fieldsSub)
+}
+
+// tomlTypeName returns the TOML type name of the Go value's type. It is
+// used to determine whether the types of array elements are mixed (which is
+// forbidden). If the Go value is nil, then it is illegal for it to be an array
+// element, and valueIsNil is returned as true.
+
+// Returns the TOML type of a Go value. The type may be `nil`, which means
+// no concrete TOML type could be found.
+func tomlTypeOfGo(rv reflect.Value) tomlType {
+ if isNil(rv) || !rv.IsValid() {
+ return nil
+ }
+ switch rv.Kind() {
+ case reflect.Bool:
+ return tomlBool
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
+ reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
+ reflect.Uint64:
+ return tomlInteger
+ case reflect.Float32, reflect.Float64:
+ return tomlFloat
+ case reflect.Array, reflect.Slice:
+ if typeEqual(tomlHash, tomlArrayType(rv)) {
+ return tomlArrayHash
+ } else {
+ return tomlArray
+ }
+ case reflect.Ptr, reflect.Interface:
+ return tomlTypeOfGo(rv.Elem())
+ case reflect.String:
+ return tomlString
+ case reflect.Map:
+ return tomlHash
+ case reflect.Struct:
+ switch rv.Interface().(type) {
+ case time.Time:
+ return tomlDatetime
+ case TextMarshaler:
+ return tomlString
+ default:
+ return tomlHash
+ }
+ default:
+ panic("unexpected reflect.Kind: " + rv.Kind().String())
+ }
+}
+
+// tomlArrayType returns the element type of a TOML array. The type returned
+// may be nil if it cannot be determined (e.g., a nil slice or a zero length
+// slize). This function may also panic if it finds a type that cannot be
+// expressed in TOML (such as nil elements, heterogeneous arrays or directly
+// nested arrays of tables).
+func tomlArrayType(rv reflect.Value) tomlType {
+ if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
+ return nil
+ }
+ firstType := tomlTypeOfGo(rv.Index(0))
+ if firstType == nil {
+ encPanic(errArrayNilElement)
+ }
+
+ rvlen := rv.Len()
+ for i := 1; i < rvlen; i++ {
+ elem := rv.Index(i)
+ switch elemType := tomlTypeOfGo(elem); {
+ case elemType == nil:
+ encPanic(errArrayNilElement)
+ case !typeEqual(firstType, elemType):
+ encPanic(errArrayMixedElementTypes)
+ }
+ }
+ // If we have a nested array, then we must make sure that the nested
+ // array contains ONLY primitives.
+ // This checks arbitrarily nested arrays.
+ if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
+ nest := tomlArrayType(eindirect(rv.Index(0)))
+ if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
+ encPanic(errArrayNoTable)
+ }
+ }
+ return firstType
+}
+
+func getOptions(keyName string) (string, map[string]struct{}) {
+ opts := make(map[string]struct{})
+ ss := strings.Split(keyName, ",")
+ name := ss[0]
+ if len(ss) > 1 {
+ for _, opt := range ss {
+ opts[opt] = struct{}{}
+ }
+ }
+
+ return name, opts
+}
+
+func isZero(rv reflect.Value) bool {
+ switch rv.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return rv.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return rv.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return rv.Float() == 0.0
+ }
+ return false
+}
+
+func isEmpty(rv reflect.Value) bool {
+ switch rv.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
+ return rv.Len() == 0
+ case reflect.Bool:
+ return !rv.Bool()
+ }
+ return false
+}
+
+func (enc *Encoder) newline() {
+ if enc.hasWritten {
+ enc.wf("\n")
+ }
+}
+
+func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
+ if len(key) == 0 {
+ encPanic(errNoKey)
+ }
+ panicIfInvalidKey(key)
+ enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
+ enc.eElement(val)
+ enc.newline()
+}
+
+func (enc *Encoder) wf(format string, v ...interface{}) {
+ if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
+ encPanic(err)
+ }
+ enc.hasWritten = true
+}
+
+func (enc *Encoder) indentStr(key Key) string {
+ return strings.Repeat(enc.Indent, len(key)-1)
+}
+
+func encPanic(err error) {
+ panic(tomlEncodeError{err})
+}
+
+func eindirect(v reflect.Value) reflect.Value {
+ switch v.Kind() {
+ case reflect.Ptr, reflect.Interface:
+ return eindirect(v.Elem())
+ default:
+ return v
+ }
+}
+
+func isNil(rv reflect.Value) bool {
+ switch rv.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return rv.IsNil()
+ default:
+ return false
+ }
+}
+
+func panicIfInvalidKey(key Key) {
+ for _, k := range key {
+ if len(k) == 0 {
+ encPanic(e("Key '%s' is not a valid table name. Key names "+
+ "cannot be empty.", key.maybeQuotedAll()))
+ }
+ }
+}
+
+func isValidKeyName(s string) bool {
+ return len(s) != 0
+}
diff --git a/vendor/github.com/BurntSushi/toml/encode_test.go b/vendor/github.com/BurntSushi/toml/encode_test.go
new file mode 100644
index 0000000000..ef7acdd74b
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/encode_test.go
@@ -0,0 +1,590 @@
+package toml
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "net"
+ "testing"
+ "time"
+)
+
+func TestEncodeRoundTrip(t *testing.T) {
+ type Config struct {
+ Age int
+ Cats []string
+ Pi float64
+ Perfection []int
+ DOB time.Time
+ Ipaddress net.IP
+ }
+
+ var inputs = Config{
+ 13,
+ []string{"one", "two", "three"},
+ 3.145,
+ []int{11, 2, 3, 4},
+ time.Now(),
+ net.ParseIP("192.168.59.254"),
+ }
+
+ var firstBuffer bytes.Buffer
+ e := NewEncoder(&firstBuffer)
+ err := e.Encode(inputs)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var outputs Config
+ if _, err := Decode(firstBuffer.String(), &outputs); err != nil {
+ log.Printf("Could not decode:\n-----\n%s\n-----\n",
+ firstBuffer.String())
+ t.Fatal(err)
+ }
+
+ // could test each value individually, but I'm lazy
+ var secondBuffer bytes.Buffer
+ e2 := NewEncoder(&secondBuffer)
+ err = e2.Encode(outputs)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if firstBuffer.String() != secondBuffer.String() {
+ t.Error(
+ firstBuffer.String(),
+ "\n\n is not identical to\n\n",
+ secondBuffer.String())
+ }
+}
+
+// XXX(burntsushi)
+// I think these tests probably should be removed. They are good, but they
+// ought to be obsolete by toml-test.
+func TestEncode(t *testing.T) {
+ type Embedded struct {
+ Int int `toml:"_int"`
+ }
+ type NonStruct int
+
+ date := time.Date(2014, 5, 11, 20, 30, 40, 0, time.FixedZone("IST", 3600))
+ dateStr := "2014-05-11T19:30:40Z"
+
+ tests := map[string]struct {
+ input interface{}
+ wantOutput string
+ wantError error
+ }{
+ "bool field": {
+ input: struct {
+ BoolTrue bool
+ BoolFalse bool
+ }{true, false},
+ wantOutput: "BoolTrue = true\nBoolFalse = false\n",
+ },
+ "int fields": {
+ input: struct {
+ Int int
+ Int8 int8
+ Int16 int16
+ Int32 int32
+ Int64 int64
+ }{1, 2, 3, 4, 5},
+ wantOutput: "Int = 1\nInt8 = 2\nInt16 = 3\nInt32 = 4\nInt64 = 5\n",
+ },
+ "uint fields": {
+ input: struct {
+ Uint uint
+ Uint8 uint8
+ Uint16 uint16
+ Uint32 uint32
+ Uint64 uint64
+ }{1, 2, 3, 4, 5},
+ wantOutput: "Uint = 1\nUint8 = 2\nUint16 = 3\nUint32 = 4" +
+ "\nUint64 = 5\n",
+ },
+ "float fields": {
+ input: struct {
+ Float32 float32
+ Float64 float64
+ }{1.5, 2.5},
+ wantOutput: "Float32 = 1.5\nFloat64 = 2.5\n",
+ },
+ "string field": {
+ input: struct{ String string }{"foo"},
+ wantOutput: "String = \"foo\"\n",
+ },
+ "string field and unexported field": {
+ input: struct {
+ String string
+ unexported int
+ }{"foo", 0},
+ wantOutput: "String = \"foo\"\n",
+ },
+ "datetime field in UTC": {
+ input: struct{ Date time.Time }{date},
+ wantOutput: fmt.Sprintf("Date = %s\n", dateStr),
+ },
+ "datetime field as primitive": {
+ // Using a map here to fail if isStructOrMap() returns true for
+ // time.Time.
+ input: map[string]interface{}{
+ "Date": date,
+ "Int": 1,
+ },
+ wantOutput: fmt.Sprintf("Date = %s\nInt = 1\n", dateStr),
+ },
+ "array fields": {
+ input: struct {
+ IntArray0 [0]int
+ IntArray3 [3]int
+ }{[0]int{}, [3]int{1, 2, 3}},
+ wantOutput: "IntArray0 = []\nIntArray3 = [1, 2, 3]\n",
+ },
+ "slice fields": {
+ input: struct{ IntSliceNil, IntSlice0, IntSlice3 []int }{
+ nil, []int{}, []int{1, 2, 3},
+ },
+ wantOutput: "IntSlice0 = []\nIntSlice3 = [1, 2, 3]\n",
+ },
+ "datetime slices": {
+ input: struct{ DatetimeSlice []time.Time }{
+ []time.Time{date, date},
+ },
+ wantOutput: fmt.Sprintf("DatetimeSlice = [%s, %s]\n",
+ dateStr, dateStr),
+ },
+ "nested arrays and slices": {
+ input: struct {
+ SliceOfArrays [][2]int
+ ArrayOfSlices [2][]int
+ SliceOfArraysOfSlices [][2][]int
+ ArrayOfSlicesOfArrays [2][][2]int
+ SliceOfMixedArrays [][2]interface{}
+ ArrayOfMixedSlices [2][]interface{}
+ }{
+ [][2]int{{1, 2}, {3, 4}},
+ [2][]int{{1, 2}, {3, 4}},
+ [][2][]int{
+ {
+ {1, 2}, {3, 4},
+ },
+ {
+ {5, 6}, {7, 8},
+ },
+ },
+ [2][][2]int{
+ {
+ {1, 2}, {3, 4},
+ },
+ {
+ {5, 6}, {7, 8},
+ },
+ },
+ [][2]interface{}{
+ {1, 2}, {"a", "b"},
+ },
+ [2][]interface{}{
+ {1, 2}, {"a", "b"},
+ },
+ },
+ wantOutput: `SliceOfArrays = [[1, 2], [3, 4]]
+ArrayOfSlices = [[1, 2], [3, 4]]
+SliceOfArraysOfSlices = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
+ArrayOfSlicesOfArrays = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
+SliceOfMixedArrays = [[1, 2], ["a", "b"]]
+ArrayOfMixedSlices = [[1, 2], ["a", "b"]]
+`,
+ },
+ "empty slice": {
+ input: struct{ Empty []interface{} }{[]interface{}{}},
+ wantOutput: "Empty = []\n",
+ },
+ "(error) slice with element type mismatch (string and integer)": {
+ input: struct{ Mixed []interface{} }{[]interface{}{1, "a"}},
+ wantError: errArrayMixedElementTypes,
+ },
+ "(error) slice with element type mismatch (integer and float)": {
+ input: struct{ Mixed []interface{} }{[]interface{}{1, 2.5}},
+ wantError: errArrayMixedElementTypes,
+ },
+ "slice with elems of differing Go types, same TOML types": {
+ input: struct {
+ MixedInts []interface{}
+ MixedFloats []interface{}
+ }{
+ []interface{}{
+ int(1), int8(2), int16(3), int32(4), int64(5),
+ uint(1), uint8(2), uint16(3), uint32(4), uint64(5),
+ },
+ []interface{}{float32(1.5), float64(2.5)},
+ },
+ wantOutput: "MixedInts = [1, 2, 3, 4, 5, 1, 2, 3, 4, 5]\n" +
+ "MixedFloats = [1.5, 2.5]\n",
+ },
+ "(error) slice w/ element type mismatch (one is nested array)": {
+ input: struct{ Mixed []interface{} }{
+ []interface{}{1, []interface{}{2}},
+ },
+ wantError: errArrayMixedElementTypes,
+ },
+ "(error) slice with 1 nil element": {
+ input: struct{ NilElement1 []interface{} }{[]interface{}{nil}},
+ wantError: errArrayNilElement,
+ },
+ "(error) slice with 1 nil element (and other non-nil elements)": {
+ input: struct{ NilElement []interface{} }{
+ []interface{}{1, nil},
+ },
+ wantError: errArrayNilElement,
+ },
+ "simple map": {
+ input: map[string]int{"a": 1, "b": 2},
+ wantOutput: "a = 1\nb = 2\n",
+ },
+ "map with interface{} value type": {
+ input: map[string]interface{}{"a": 1, "b": "c"},
+ wantOutput: "a = 1\nb = \"c\"\n",
+ },
+ "map with interface{} value type, some of which are structs": {
+ input: map[string]interface{}{
+ "a": struct{ Int int }{2},
+ "b": 1,
+ },
+ wantOutput: "b = 1\n\n[a]\n Int = 2\n",
+ },
+ "nested map": {
+ input: map[string]map[string]int{
+ "a": {"b": 1},
+ "c": {"d": 2},
+ },
+ wantOutput: "[a]\n b = 1\n\n[c]\n d = 2\n",
+ },
+ "nested struct": {
+ input: struct{ Struct struct{ Int int } }{
+ struct{ Int int }{1},
+ },
+ wantOutput: "[Struct]\n Int = 1\n",
+ },
+ "nested struct and non-struct field": {
+ input: struct {
+ Struct struct{ Int int }
+ Bool bool
+ }{struct{ Int int }{1}, true},
+ wantOutput: "Bool = true\n\n[Struct]\n Int = 1\n",
+ },
+ "2 nested structs": {
+ input: struct{ Struct1, Struct2 struct{ Int int } }{
+ struct{ Int int }{1}, struct{ Int int }{2},
+ },
+ wantOutput: "[Struct1]\n Int = 1\n\n[Struct2]\n Int = 2\n",
+ },
+ "deeply nested structs": {
+ input: struct {
+ Struct1, Struct2 struct{ Struct3 *struct{ Int int } }
+ }{
+ struct{ Struct3 *struct{ Int int } }{&struct{ Int int }{1}},
+ struct{ Struct3 *struct{ Int int } }{nil},
+ },
+ wantOutput: "[Struct1]\n [Struct1.Struct3]\n Int = 1" +
+ "\n\n[Struct2]\n",
+ },
+ "nested struct with nil struct elem": {
+ input: struct {
+ Struct struct{ Inner *struct{ Int int } }
+ }{
+ struct{ Inner *struct{ Int int } }{nil},
+ },
+ wantOutput: "[Struct]\n",
+ },
+ "nested struct with no fields": {
+ input: struct {
+ Struct struct{ Inner struct{} }
+ }{
+ struct{ Inner struct{} }{struct{}{}},
+ },
+ wantOutput: "[Struct]\n [Struct.Inner]\n",
+ },
+ "struct with tags": {
+ input: struct {
+ Struct struct {
+ Int int `toml:"_int"`
+ } `toml:"_struct"`
+ Bool bool `toml:"_bool"`
+ }{
+ struct {
+ Int int `toml:"_int"`
+ }{1}, true,
+ },
+ wantOutput: "_bool = true\n\n[_struct]\n _int = 1\n",
+ },
+ "embedded struct": {
+ input: struct{ Embedded }{Embedded{1}},
+ wantOutput: "_int = 1\n",
+ },
+ "embedded *struct": {
+ input: struct{ *Embedded }{&Embedded{1}},
+ wantOutput: "_int = 1\n",
+ },
+ "nested embedded struct": {
+ input: struct {
+ Struct struct{ Embedded } `toml:"_struct"`
+ }{struct{ Embedded }{Embedded{1}}},
+ wantOutput: "[_struct]\n _int = 1\n",
+ },
+ "nested embedded *struct": {
+ input: struct {
+ Struct struct{ *Embedded } `toml:"_struct"`
+ }{struct{ *Embedded }{&Embedded{1}}},
+ wantOutput: "[_struct]\n _int = 1\n",
+ },
+ "embedded non-struct": {
+ input: struct{ NonStruct }{5},
+ wantOutput: "NonStruct = 5\n",
+ },
+ "array of tables": {
+ input: struct {
+ Structs []*struct{ Int int } `toml:"struct"`
+ }{
+ []*struct{ Int int }{{1}, {3}},
+ },
+ wantOutput: "[[struct]]\n Int = 1\n\n[[struct]]\n Int = 3\n",
+ },
+ "array of tables order": {
+ input: map[string]interface{}{
+ "map": map[string]interface{}{
+ "zero": 5,
+ "arr": []map[string]int{
+ {
+ "friend": 5,
+ },
+ },
+ },
+ },
+ wantOutput: "[map]\n zero = 5\n\n [[map.arr]]\n friend = 5\n",
+ },
+ "(error) top-level slice": {
+ input: []struct{ Int int }{{1}, {2}, {3}},
+ wantError: errNoKey,
+ },
+ "(error) slice of slice": {
+ input: struct {
+ Slices [][]struct{ Int int }
+ }{
+ [][]struct{ Int int }{{{1}}, {{2}}, {{3}}},
+ },
+ wantError: errArrayNoTable,
+ },
+ "(error) map no string key": {
+ input: map[int]string{1: ""},
+ wantError: errNonString,
+ },
+ "(error) empty key name": {
+ input: map[string]int{"": 1},
+ wantError: errAnything,
+ },
+ "(error) empty map name": {
+ input: map[string]interface{}{
+ "": map[string]int{"v": 1},
+ },
+ wantError: errAnything,
+ },
+ }
+ for label, test := range tests {
+ encodeExpected(t, label, test.input, test.wantOutput, test.wantError)
+ }
+}
+
+func TestEncodeNestedTableArrays(t *testing.T) {
+ type song struct {
+ Name string `toml:"name"`
+ }
+ type album struct {
+ Name string `toml:"name"`
+ Songs []song `toml:"songs"`
+ }
+ type springsteen struct {
+ Albums []album `toml:"albums"`
+ }
+ value := springsteen{
+ []album{
+ {"Born to Run",
+ []song{{"Jungleland"}, {"Meeting Across the River"}}},
+ {"Born in the USA",
+ []song{{"Glory Days"}, {"Dancing in the Dark"}}},
+ },
+ }
+ expected := `[[albums]]
+ name = "Born to Run"
+
+ [[albums.songs]]
+ name = "Jungleland"
+
+ [[albums.songs]]
+ name = "Meeting Across the River"
+
+[[albums]]
+ name = "Born in the USA"
+
+ [[albums.songs]]
+ name = "Glory Days"
+
+ [[albums.songs]]
+ name = "Dancing in the Dark"
+`
+ encodeExpected(t, "nested table arrays", value, expected, nil)
+}
+
+func TestEncodeArrayHashWithNormalHashOrder(t *testing.T) {
+ type Alpha struct {
+ V int
+ }
+ type Beta struct {
+ V int
+ }
+ type Conf struct {
+ V int
+ A Alpha
+ B []Beta
+ }
+
+ val := Conf{
+ V: 1,
+ A: Alpha{2},
+ B: []Beta{{3}},
+ }
+ expected := "V = 1\n\n[A]\n V = 2\n\n[[B]]\n V = 3\n"
+ encodeExpected(t, "array hash with normal hash order", val, expected, nil)
+}
+
+func TestEncodeWithOmitEmpty(t *testing.T) {
+ type simple struct {
+ Bool bool `toml:"bool,omitempty"`
+ String string `toml:"string,omitempty"`
+ Array [0]byte `toml:"array,omitempty"`
+ Slice []int `toml:"slice,omitempty"`
+ Map map[string]string `toml:"map,omitempty"`
+ }
+
+ var v simple
+ encodeExpected(t, "fields with omitempty are omitted when empty", v, "", nil)
+ v = simple{
+ Bool: true,
+ String: " ",
+ Slice: []int{2, 3, 4},
+ Map: map[string]string{"foo": "bar"},
+ }
+ expected := `bool = true
+string = " "
+slice = [2, 3, 4]
+
+[map]
+ foo = "bar"
+`
+ encodeExpected(t, "fields with omitempty are not omitted when non-empty",
+ v, expected, nil)
+}
+
+func TestEncodeWithOmitZero(t *testing.T) {
+ type simple struct {
+ Number int `toml:"number,omitzero"`
+ Real float64 `toml:"real,omitzero"`
+ Unsigned uint `toml:"unsigned,omitzero"`
+ }
+
+ value := simple{0, 0.0, uint(0)}
+ expected := ""
+
+ encodeExpected(t, "simple with omitzero, all zero", value, expected, nil)
+
+ value.Number = 10
+ value.Real = 20
+ value.Unsigned = 5
+ expected = `number = 10
+real = 20.0
+unsigned = 5
+`
+ encodeExpected(t, "simple with omitzero, non-zero", value, expected, nil)
+}
+
+func TestEncodeOmitemptyWithEmptyName(t *testing.T) {
+ type simple struct {
+ S []int `toml:",omitempty"`
+ }
+ v := simple{[]int{1, 2, 3}}
+ expected := "S = [1, 2, 3]\n"
+ encodeExpected(t, "simple with omitempty, no name, non-empty field",
+ v, expected, nil)
+}
+
+func TestEncodeAnonymousStructPointerField(t *testing.T) {
+ type Sub struct{}
+ type simple struct {
+ *Sub
+ }
+
+ value := simple{}
+ expected := ""
+ encodeExpected(t, "nil anonymous struct pointer field", value, expected, nil)
+
+ value = simple{Sub: &Sub{}}
+ expected = ""
+ encodeExpected(t, "non-nil anonymous struct pointer field", value, expected, nil)
+}
+
+func TestEncodeIgnoredFields(t *testing.T) {
+ type simple struct {
+ Number int `toml:"-"`
+ }
+ value := simple{}
+ expected := ""
+ encodeExpected(t, "ignored field", value, expected, nil)
+}
+
+func encodeExpected(
+ t *testing.T, label string, val interface{}, wantStr string, wantErr error,
+) {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+ err := enc.Encode(val)
+ if err != wantErr {
+ if wantErr != nil {
+ if wantErr == errAnything && err != nil {
+ return
+ }
+ t.Errorf("%s: want Encode error %v, got %v", label, wantErr, err)
+ } else {
+ t.Errorf("%s: Encode failed: %s", label, err)
+ }
+ }
+ if err != nil {
+ return
+ }
+ if got := buf.String(); wantStr != got {
+ t.Errorf("%s: want\n-----\n%q\n-----\nbut got\n-----\n%q\n-----\n",
+ label, wantStr, got)
+ }
+}
+
+func ExampleEncoder_Encode() {
+ date, _ := time.Parse(time.RFC822, "14 Mar 10 18:00 UTC")
+ var config = map[string]interface{}{
+ "date": date,
+ "counts": []int{1, 1, 2, 3, 5, 8},
+ "hash": map[string]string{
+ "key1": "val1",
+ "key2": "val2",
+ },
+ }
+ buf := new(bytes.Buffer)
+ if err := NewEncoder(buf).Encode(config); err != nil {
+ log.Fatal(err)
+ }
+ fmt.Println(buf.String())
+
+ // Output:
+ // counts = [1, 1, 2, 3, 5, 8]
+ // date = 2010-03-14T18:00:00Z
+ //
+ // [hash]
+ // key1 = "val1"
+ // key2 = "val2"
+}
diff --git a/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/BurntSushi/toml/encoding_types.go
new file mode 100644
index 0000000000..d36e1dd600
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/encoding_types.go
@@ -0,0 +1,19 @@
+// +build go1.2
+
+package toml
+
+// In order to support Go 1.1, we define our own TextMarshaler and
+// TextUnmarshaler types. For Go 1.2+, we just alias them with the
+// standard library interfaces.
+
+import (
+ "encoding"
+)
+
+// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
+// so that Go 1.1 can be supported.
+type TextMarshaler encoding.TextMarshaler
+
+// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
+// here so that Go 1.1 can be supported.
+type TextUnmarshaler encoding.TextUnmarshaler
diff --git a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
new file mode 100644
index 0000000000..e8d503d046
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
@@ -0,0 +1,18 @@
+// +build !go1.2
+
+package toml
+
+// These interfaces were introduced in Go 1.2, so we add them manually when
+// compiling for Go 1.1.
+
+// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
+// so that Go 1.1 can be supported.
+type TextMarshaler interface {
+ MarshalText() (text []byte, err error)
+}
+
+// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
+// here so that Go 1.1 can be supported.
+type TextUnmarshaler interface {
+ UnmarshalText(text []byte) error
+}
diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go
new file mode 100644
index 0000000000..9b20b3a815
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/lex.go
@@ -0,0 +1,871 @@
+package toml
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+)
+
+type itemType int
+
+const (
+ itemError itemType = iota
+ itemNIL // used in the parser to indicate no type
+ itemEOF
+ itemText
+ itemString
+ itemRawString
+ itemMultilineString
+ itemRawMultilineString
+ itemBool
+ itemInteger
+ itemFloat
+ itemDatetime
+ itemArray // the start of an array
+ itemArrayEnd
+ itemTableStart
+ itemTableEnd
+ itemArrayTableStart
+ itemArrayTableEnd
+ itemKeyStart
+ itemCommentStart
+)
+
+const (
+ eof = 0
+ tableStart = '['
+ tableEnd = ']'
+ arrayTableStart = '['
+ arrayTableEnd = ']'
+ tableSep = '.'
+ keySep = '='
+ arrayStart = '['
+ arrayEnd = ']'
+ arrayValTerm = ','
+ commentStart = '#'
+ stringStart = '"'
+ stringEnd = '"'
+ rawStringStart = '\''
+ rawStringEnd = '\''
+)
+
+type stateFn func(lx *lexer) stateFn
+
+type lexer struct {
+ input string
+ start int
+ pos int
+ width int
+ line int
+ state stateFn
+ items chan item
+
+ // A stack of state functions used to maintain context.
+ // The idea is to reuse parts of the state machine in various places.
+ // For example, values can appear at the top level or within arbitrarily
+ // nested arrays. The last state on the stack is used after a value has
+ // been lexed. Similarly for comments.
+ stack []stateFn
+}
+
+type item struct {
+ typ itemType
+ val string
+ line int
+}
+
+func (lx *lexer) nextItem() item {
+ for {
+ select {
+ case item := <-lx.items:
+ return item
+ default:
+ lx.state = lx.state(lx)
+ }
+ }
+}
+
+func lex(input string) *lexer {
+ lx := &lexer{
+ input: input + "\n",
+ state: lexTop,
+ line: 1,
+ items: make(chan item, 10),
+ stack: make([]stateFn, 0, 10),
+ }
+ return lx
+}
+
+func (lx *lexer) push(state stateFn) {
+ lx.stack = append(lx.stack, state)
+}
+
+func (lx *lexer) pop() stateFn {
+ if len(lx.stack) == 0 {
+ return lx.errorf("BUG in lexer: no states to pop.")
+ }
+ last := lx.stack[len(lx.stack)-1]
+ lx.stack = lx.stack[0 : len(lx.stack)-1]
+ return last
+}
+
+func (lx *lexer) current() string {
+ return lx.input[lx.start:lx.pos]
+}
+
+func (lx *lexer) emit(typ itemType) {
+ lx.items <- item{typ, lx.current(), lx.line}
+ lx.start = lx.pos
+}
+
+func (lx *lexer) emitTrim(typ itemType) {
+ lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
+ lx.start = lx.pos
+}
+
+func (lx *lexer) next() (r rune) {
+ if lx.pos >= len(lx.input) {
+ lx.width = 0
+ return eof
+ }
+
+ if lx.input[lx.pos] == '\n' {
+ lx.line++
+ }
+ r, lx.width = utf8.DecodeRuneInString(lx.input[lx.pos:])
+ lx.pos += lx.width
+ return r
+}
+
+// ignore skips over the pending input before this point.
+func (lx *lexer) ignore() {
+ lx.start = lx.pos
+}
+
+// backup steps back one rune. Can be called only once per call of next.
+func (lx *lexer) backup() {
+ lx.pos -= lx.width
+ if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
+ lx.line--
+ }
+}
+
+// accept consumes the next rune if it's equal to `valid`.
+func (lx *lexer) accept(valid rune) bool {
+ if lx.next() == valid {
+ return true
+ }
+ lx.backup()
+ return false
+}
+
+// peek returns but does not consume the next rune in the input.
+func (lx *lexer) peek() rune {
+ r := lx.next()
+ lx.backup()
+ return r
+}
+
+// errorf stops all lexing by emitting an error and returning `nil`.
+// Note that any value that is a character is escaped if it's a special
+// character (new lines, tabs, etc.).
+func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
+ lx.items <- item{
+ itemError,
+ fmt.Sprintf(format, values...),
+ lx.line,
+ }
+ return nil
+}
+
+// lexTop consumes elements at the top level of TOML data.
+func lexTop(lx *lexer) stateFn {
+ r := lx.next()
+ if isWhitespace(r) || isNL(r) {
+ return lexSkip(lx, lexTop)
+ }
+
+ switch r {
+ case commentStart:
+ lx.push(lexTop)
+ return lexCommentStart
+ case tableStart:
+ return lexTableStart
+ case eof:
+ if lx.pos > lx.start {
+ return lx.errorf("Unexpected EOF.")
+ }
+ lx.emit(itemEOF)
+ return nil
+ }
+
+ // At this point, the only valid item can be a key, so we back up
+ // and let the key lexer do the rest.
+ lx.backup()
+ lx.push(lexTopEnd)
+ return lexKeyStart
+}
+
+// lexTopEnd is entered whenever a top-level item has been consumed. (A value
+// or a table.) It must see only whitespace, and will turn back to lexTop
+// upon a new line. If it sees EOF, it will quit the lexer successfully.
+func lexTopEnd(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case r == commentStart:
+ // a comment will read to a new line for us.
+ lx.push(lexTop)
+ return lexCommentStart
+ case isWhitespace(r):
+ return lexTopEnd
+ case isNL(r):
+ lx.ignore()
+ return lexTop
+ case r == eof:
+ lx.ignore()
+ return lexTop
+ }
+ return lx.errorf("Expected a top-level item to end with a new line, "+
+ "comment or EOF, but got %q instead.", r)
+}
+
+// lexTable lexes the beginning of a table. Namely, it makes sure that
+// it starts with a character other than '.' and ']'.
+// It assumes that '[' has already been consumed.
+// It also handles the case that this is an item in an array of tables.
+// e.g., '[[name]]'.
+func lexTableStart(lx *lexer) stateFn {
+ if lx.peek() == arrayTableStart {
+ lx.next()
+ lx.emit(itemArrayTableStart)
+ lx.push(lexArrayTableEnd)
+ } else {
+ lx.emit(itemTableStart)
+ lx.push(lexTableEnd)
+ }
+ return lexTableNameStart
+}
+
+func lexTableEnd(lx *lexer) stateFn {
+ lx.emit(itemTableEnd)
+ return lexTopEnd
+}
+
+func lexArrayTableEnd(lx *lexer) stateFn {
+ if r := lx.next(); r != arrayTableEnd {
+ return lx.errorf("Expected end of table array name delimiter %q, "+
+ "but got %q instead.", arrayTableEnd, r)
+ }
+ lx.emit(itemArrayTableEnd)
+ return lexTopEnd
+}
+
+func lexTableNameStart(lx *lexer) stateFn {
+ switch r := lx.peek(); {
+ case r == tableEnd || r == eof:
+ return lx.errorf("Unexpected end of table name. (Table names cannot " +
+ "be empty.)")
+ case r == tableSep:
+ return lx.errorf("Unexpected table separator. (Table names cannot " +
+ "be empty.)")
+ case r == stringStart || r == rawStringStart:
+ lx.ignore()
+ lx.push(lexTableNameEnd)
+ return lexValue // reuse string lexing
+ default:
+ return lexBareTableName
+ }
+}
+
+// lexTableName lexes the name of a table. It assumes that at least one
+// valid character for the table has already been read.
+func lexBareTableName(lx *lexer) stateFn {
+ switch r := lx.next(); {
+ case isBareKeyChar(r):
+ return lexBareTableName
+ case r == tableSep || r == tableEnd:
+ lx.backup()
+ lx.emitTrim(itemText)
+ return lexTableNameEnd
+ default:
+ return lx.errorf("Bare keys cannot contain %q.", r)
+ }
+}
+
+// lexTableNameEnd reads the end of a piece of a table name, optionally
+// consuming whitespace.
+func lexTableNameEnd(lx *lexer) stateFn {
+ switch r := lx.next(); {
+ case isWhitespace(r):
+ return lexTableNameEnd
+ case r == tableSep:
+ lx.ignore()
+ return lexTableNameStart
+ case r == tableEnd:
+ return lx.pop()
+ default:
+ return lx.errorf("Expected '.' or ']' to end table name, but got %q "+
+ "instead.", r)
+ }
+}
+
+// lexKeyStart consumes a key name up until the first non-whitespace character.
+// lexKeyStart will ignore whitespace.
+func lexKeyStart(lx *lexer) stateFn {
+ r := lx.peek()
+ switch {
+ case r == keySep:
+ return lx.errorf("Unexpected key separator %q.", keySep)
+ case isWhitespace(r) || isNL(r):
+ lx.next()
+ return lexSkip(lx, lexKeyStart)
+ case r == stringStart || r == rawStringStart:
+ lx.ignore()
+ lx.emit(itemKeyStart)
+ lx.push(lexKeyEnd)
+ return lexValue // reuse string lexing
+ default:
+ lx.ignore()
+ lx.emit(itemKeyStart)
+ return lexBareKey
+ }
+}
+
+// lexBareKey consumes the text of a bare key. Assumes that the first character
+// (which is not whitespace) has not yet been consumed.
+func lexBareKey(lx *lexer) stateFn {
+ switch r := lx.next(); {
+ case isBareKeyChar(r):
+ return lexBareKey
+ case isWhitespace(r):
+ lx.emitTrim(itemText)
+ return lexKeyEnd
+ case r == keySep:
+ lx.backup()
+ lx.emitTrim(itemText)
+ return lexKeyEnd
+ default:
+ return lx.errorf("Bare keys cannot contain %q.", r)
+ }
+}
+
+// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
+// separator).
+func lexKeyEnd(lx *lexer) stateFn {
+ switch r := lx.next(); {
+ case r == keySep:
+ return lexSkip(lx, lexValue)
+ case isWhitespace(r):
+ return lexSkip(lx, lexKeyEnd)
+ default:
+ return lx.errorf("Expected key separator %q, but got %q instead.",
+ keySep, r)
+ }
+}
+
+// lexValue starts the consumption of a value anywhere a value is expected.
+// lexValue will ignore whitespace.
+// After a value is lexed, the last state on the next is popped and returned.
+func lexValue(lx *lexer) stateFn {
+ // We allow whitespace to precede a value, but NOT new lines.
+ // In array syntax, the array states are responsible for ignoring new
+ // lines.
+ r := lx.next()
+ if isWhitespace(r) {
+ return lexSkip(lx, lexValue)
+ }
+
+ switch {
+ case r == arrayStart:
+ lx.ignore()
+ lx.emit(itemArray)
+ return lexArrayValue
+ case r == stringStart:
+ if lx.accept(stringStart) {
+ if lx.accept(stringStart) {
+ lx.ignore() // Ignore """
+ return lexMultilineString
+ }
+ lx.backup()
+ }
+ lx.ignore() // ignore the '"'
+ return lexString
+ case r == rawStringStart:
+ if lx.accept(rawStringStart) {
+ if lx.accept(rawStringStart) {
+ lx.ignore() // Ignore """
+ return lexMultilineRawString
+ }
+ lx.backup()
+ }
+ lx.ignore() // ignore the "'"
+ return lexRawString
+ case r == 't':
+ return lexTrue
+ case r == 'f':
+ return lexFalse
+ case r == '-':
+ return lexNumberStart
+ case isDigit(r):
+ lx.backup() // avoid an extra state and use the same as above
+ return lexNumberOrDateStart
+ case r == '.': // special error case, be kind to users
+ return lx.errorf("Floats must start with a digit, not '.'.")
+ }
+ return lx.errorf("Expected value but found %q instead.", r)
+}
+
+// lexArrayValue consumes one value in an array. It assumes that '[' or ','
+// have already been consumed. All whitespace and new lines are ignored.
+func lexArrayValue(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isWhitespace(r) || isNL(r):
+ return lexSkip(lx, lexArrayValue)
+ case r == commentStart:
+ lx.push(lexArrayValue)
+ return lexCommentStart
+ case r == arrayValTerm:
+ return lx.errorf("Unexpected array value terminator %q.",
+ arrayValTerm)
+ case r == arrayEnd:
+ return lexArrayEnd
+ }
+
+ lx.backup()
+ lx.push(lexArrayValueEnd)
+ return lexValue
+}
+
+// lexArrayValueEnd consumes the cruft between values of an array. Namely,
+// it ignores whitespace and expects either a ',' or a ']'.
+func lexArrayValueEnd(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isWhitespace(r) || isNL(r):
+ return lexSkip(lx, lexArrayValueEnd)
+ case r == commentStart:
+ lx.push(lexArrayValueEnd)
+ return lexCommentStart
+ case r == arrayValTerm:
+ lx.ignore()
+ return lexArrayValue // move on to the next value
+ case r == arrayEnd:
+ return lexArrayEnd
+ }
+ return lx.errorf("Expected an array value terminator %q or an array "+
+ "terminator %q, but got %q instead.", arrayValTerm, arrayEnd, r)
+}
+
+// lexArrayEnd finishes the lexing of an array. It assumes that a ']' has
+// just been consumed.
+func lexArrayEnd(lx *lexer) stateFn {
+ lx.ignore()
+ lx.emit(itemArrayEnd)
+ return lx.pop()
+}
+
+// lexString consumes the inner contents of a string. It assumes that the
+// beginning '"' has already been consumed and ignored.
+func lexString(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isNL(r):
+ return lx.errorf("Strings cannot contain new lines.")
+ case r == '\\':
+ lx.push(lexString)
+ return lexStringEscape
+ case r == stringEnd:
+ lx.backup()
+ lx.emit(itemString)
+ lx.next()
+ lx.ignore()
+ return lx.pop()
+ }
+ return lexString
+}
+
+// lexMultilineString consumes the inner contents of a string. It assumes that
+// the beginning '"""' has already been consumed and ignored.
+func lexMultilineString(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case r == '\\':
+ return lexMultilineStringEscape
+ case r == stringEnd:
+ if lx.accept(stringEnd) {
+ if lx.accept(stringEnd) {
+ lx.backup()
+ lx.backup()
+ lx.backup()
+ lx.emit(itemMultilineString)
+ lx.next()
+ lx.next()
+ lx.next()
+ lx.ignore()
+ return lx.pop()
+ }
+ lx.backup()
+ }
+ }
+ return lexMultilineString
+}
+
+// lexRawString consumes a raw string. Nothing can be escaped in such a string.
+// It assumes that the beginning "'" has already been consumed and ignored.
+func lexRawString(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isNL(r):
+ return lx.errorf("Strings cannot contain new lines.")
+ case r == rawStringEnd:
+ lx.backup()
+ lx.emit(itemRawString)
+ lx.next()
+ lx.ignore()
+ return lx.pop()
+ }
+ return lexRawString
+}
+
+// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
+// a string. It assumes that the beginning "'" has already been consumed and
+// ignored.
+func lexMultilineRawString(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case r == rawStringEnd:
+ if lx.accept(rawStringEnd) {
+ if lx.accept(rawStringEnd) {
+ lx.backup()
+ lx.backup()
+ lx.backup()
+ lx.emit(itemRawMultilineString)
+ lx.next()
+ lx.next()
+ lx.next()
+ lx.ignore()
+ return lx.pop()
+ }
+ lx.backup()
+ }
+ }
+ return lexMultilineRawString
+}
+
+// lexMultilineStringEscape consumes an escaped character. It assumes that the
+// preceding '\\' has already been consumed.
+func lexMultilineStringEscape(lx *lexer) stateFn {
+ // Handle the special case first:
+ if isNL(lx.next()) {
+ return lexMultilineString
+ } else {
+ lx.backup()
+ lx.push(lexMultilineString)
+ return lexStringEscape(lx)
+ }
+}
+
+func lexStringEscape(lx *lexer) stateFn {
+ r := lx.next()
+ switch r {
+ case 'b':
+ fallthrough
+ case 't':
+ fallthrough
+ case 'n':
+ fallthrough
+ case 'f':
+ fallthrough
+ case 'r':
+ fallthrough
+ case '"':
+ fallthrough
+ case '\\':
+ return lx.pop()
+ case 'u':
+ return lexShortUnicodeEscape
+ case 'U':
+ return lexLongUnicodeEscape
+ }
+ return lx.errorf("Invalid escape character %q. Only the following "+
+ "escape characters are allowed: "+
+ "\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, "+
+ "\\uXXXX and \\UXXXXXXXX.", r)
+}
+
+func lexShortUnicodeEscape(lx *lexer) stateFn {
+ var r rune
+ for i := 0; i < 4; i++ {
+ r = lx.next()
+ if !isHexadecimal(r) {
+ return lx.errorf("Expected four hexadecimal digits after '\\u', "+
+ "but got '%s' instead.", lx.current())
+ }
+ }
+ return lx.pop()
+}
+
+func lexLongUnicodeEscape(lx *lexer) stateFn {
+ var r rune
+ for i := 0; i < 8; i++ {
+ r = lx.next()
+ if !isHexadecimal(r) {
+ return lx.errorf("Expected eight hexadecimal digits after '\\U', "+
+ "but got '%s' instead.", lx.current())
+ }
+ }
+ return lx.pop()
+}
+
+// lexNumberOrDateStart consumes either a (positive) integer, float or
+// datetime. It assumes that NO negative sign has been consumed.
+func lexNumberOrDateStart(lx *lexer) stateFn {
+ r := lx.next()
+ if !isDigit(r) {
+ if r == '.' {
+ return lx.errorf("Floats must start with a digit, not '.'.")
+ } else {
+ return lx.errorf("Expected a digit but got %q.", r)
+ }
+ }
+ return lexNumberOrDate
+}
+
+// lexNumberOrDate consumes either a (positive) integer, float or datetime.
+func lexNumberOrDate(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case r == '-':
+ if lx.pos-lx.start != 5 {
+ return lx.errorf("All ISO8601 dates must be in full Zulu form.")
+ }
+ return lexDateAfterYear
+ case isDigit(r):
+ return lexNumberOrDate
+ case r == '.':
+ return lexFloatStart
+ }
+
+ lx.backup()
+ lx.emit(itemInteger)
+ return lx.pop()
+}
+
+// lexDateAfterYear consumes a full Zulu Datetime in ISO8601 format.
+// It assumes that "YYYY-" has already been consumed.
+func lexDateAfterYear(lx *lexer) stateFn {
+ formats := []rune{
+ // digits are '0'.
+ // everything else is direct equality.
+ '0', '0', '-', '0', '0',
+ 'T',
+ '0', '0', ':', '0', '0', ':', '0', '0',
+ 'Z',
+ }
+ for _, f := range formats {
+ r := lx.next()
+ if f == '0' {
+ if !isDigit(r) {
+ return lx.errorf("Expected digit in ISO8601 datetime, "+
+ "but found %q instead.", r)
+ }
+ } else if f != r {
+ return lx.errorf("Expected %q in ISO8601 datetime, "+
+ "but found %q instead.", f, r)
+ }
+ }
+ lx.emit(itemDatetime)
+ return lx.pop()
+}
+
+// lexNumberStart consumes either an integer or a float. It assumes that
+// a negative sign has already been read, but that *no* digits have been
+// consumed. lexNumberStart will move to the appropriate integer or float
+// states.
+func lexNumberStart(lx *lexer) stateFn {
+ // we MUST see a digit. Even floats have to start with a digit.
+ r := lx.next()
+ if !isDigit(r) {
+ if r == '.' {
+ return lx.errorf("Floats must start with a digit, not '.'.")
+ } else {
+ return lx.errorf("Expected a digit but got %q.", r)
+ }
+ }
+ return lexNumber
+}
+
+// lexNumber consumes an integer or a float after seeing the first digit.
+func lexNumber(lx *lexer) stateFn {
+ r := lx.next()
+ switch {
+ case isDigit(r):
+ return lexNumber
+ case r == '.':
+ return lexFloatStart
+ }
+
+ lx.backup()
+ lx.emit(itemInteger)
+ return lx.pop()
+}
+
+// lexFloatStart starts the consumption of digits of a float after a '.'.
+// Namely, at least one digit is required.
+func lexFloatStart(lx *lexer) stateFn {
+ r := lx.next()
+ if !isDigit(r) {
+ return lx.errorf("Floats must have a digit after the '.', but got "+
+ "%q instead.", r)
+ }
+ return lexFloat
+}
+
+// lexFloat consumes the digits of a float after a '.'.
+// Assumes that one digit has been consumed after a '.' already.
+func lexFloat(lx *lexer) stateFn {
+ r := lx.next()
+ if isDigit(r) {
+ return lexFloat
+ }
+
+ lx.backup()
+ lx.emit(itemFloat)
+ return lx.pop()
+}
+
+// lexConst consumes the s[1:] in s. It assumes that s[0] has already been
+// consumed.
+func lexConst(lx *lexer, s string) stateFn {
+ for i := range s[1:] {
+ if r := lx.next(); r != rune(s[i+1]) {
+ return lx.errorf("Expected %q, but found %q instead.", s[:i+1],
+ s[:i]+string(r))
+ }
+ }
+ return nil
+}
+
+// lexTrue consumes the "rue" in "true". It assumes that 't' has already
+// been consumed.
+func lexTrue(lx *lexer) stateFn {
+ if fn := lexConst(lx, "true"); fn != nil {
+ return fn
+ }
+ lx.emit(itemBool)
+ return lx.pop()
+}
+
+// lexFalse consumes the "alse" in "false". It assumes that 'f' has already
+// been consumed.
+func lexFalse(lx *lexer) stateFn {
+ if fn := lexConst(lx, "false"); fn != nil {
+ return fn
+ }
+ lx.emit(itemBool)
+ return lx.pop()
+}
+
+// lexCommentStart begins the lexing of a comment. It will emit
+// itemCommentStart and consume no characters, passing control to lexComment.
+func lexCommentStart(lx *lexer) stateFn {
+ lx.ignore()
+ lx.emit(itemCommentStart)
+ return lexComment
+}
+
+// lexComment lexes an entire comment. It assumes that '#' has been consumed.
+// It will consume *up to* the first new line character, and pass control
+// back to the last state on the stack.
+func lexComment(lx *lexer) stateFn {
+ r := lx.peek()
+ if isNL(r) || r == eof {
+ lx.emit(itemText)
+ return lx.pop()
+ }
+ lx.next()
+ return lexComment
+}
+
+// lexSkip ignores all slurped input and moves on to the next state.
+func lexSkip(lx *lexer, nextState stateFn) stateFn {
+ return func(lx *lexer) stateFn {
+ lx.ignore()
+ return nextState
+ }
+}
+
+// isWhitespace returns true if `r` is a whitespace character according
+// to the spec.
+func isWhitespace(r rune) bool {
+ return r == '\t' || r == ' '
+}
+
+func isNL(r rune) bool {
+ return r == '\n' || r == '\r'
+}
+
+func isDigit(r rune) bool {
+ return r >= '0' && r <= '9'
+}
+
+func isHexadecimal(r rune) bool {
+ return (r >= '0' && r <= '9') ||
+ (r >= 'a' && r <= 'f') ||
+ (r >= 'A' && r <= 'F')
+}
+
+func isBareKeyChar(r rune) bool {
+ return (r >= 'A' && r <= 'Z') ||
+ (r >= 'a' && r <= 'z') ||
+ (r >= '0' && r <= '9') ||
+ r == '_' ||
+ r == '-'
+}
+
+func (itype itemType) String() string {
+ switch itype {
+ case itemError:
+ return "Error"
+ case itemNIL:
+ return "NIL"
+ case itemEOF:
+ return "EOF"
+ case itemText:
+ return "Text"
+ case itemString:
+ return "String"
+ case itemRawString:
+ return "String"
+ case itemMultilineString:
+ return "String"
+ case itemRawMultilineString:
+ return "String"
+ case itemBool:
+ return "Bool"
+ case itemInteger:
+ return "Integer"
+ case itemFloat:
+ return "Float"
+ case itemDatetime:
+ return "DateTime"
+ case itemTableStart:
+ return "TableStart"
+ case itemTableEnd:
+ return "TableEnd"
+ case itemKeyStart:
+ return "KeyStart"
+ case itemArray:
+ return "Array"
+ case itemArrayEnd:
+ return "ArrayEnd"
+ case itemCommentStart:
+ return "CommentStart"
+ }
+ panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
+}
+
+func (item item) String() string {
+ return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
+}
diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go
new file mode 100644
index 0000000000..6a82e84f64
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/parse.go
@@ -0,0 +1,493 @@
+package toml
+
+import (
+ "fmt"
+ "log"
+ "strconv"
+ "strings"
+ "time"
+ "unicode"
+ "unicode/utf8"
+)
+
+type parser struct {
+ mapping map[string]interface{}
+ types map[string]tomlType
+ lx *lexer
+
+ // A list of keys in the order that they appear in the TOML data.
+ ordered []Key
+
+ // the full key for the current hash in scope
+ context Key
+
+ // the base key name for everything except hashes
+ currentKey string
+
+ // rough approximation of line number
+ approxLine int
+
+ // A map of 'key.group.names' to whether they were created implicitly.
+ implicits map[string]bool
+}
+
+type parseError string
+
+func (pe parseError) Error() string {
+ return string(pe)
+}
+
+func parse(data string) (p *parser, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ var ok bool
+ if err, ok = r.(parseError); ok {
+ return
+ }
+ panic(r)
+ }
+ }()
+
+ p = &parser{
+ mapping: make(map[string]interface{}),
+ types: make(map[string]tomlType),
+ lx: lex(data),
+ ordered: make([]Key, 0),
+ implicits: make(map[string]bool),
+ }
+ for {
+ item := p.next()
+ if item.typ == itemEOF {
+ break
+ }
+ p.topLevel(item)
+ }
+
+ return p, nil
+}
+
+func (p *parser) panicf(format string, v ...interface{}) {
+ msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
+ p.approxLine, p.current(), fmt.Sprintf(format, v...))
+ panic(parseError(msg))
+}
+
+func (p *parser) next() item {
+ it := p.lx.nextItem()
+ if it.typ == itemError {
+ p.panicf("%s", it.val)
+ }
+ return it
+}
+
+func (p *parser) bug(format string, v ...interface{}) {
+ log.Panicf("BUG: %s\n\n", fmt.Sprintf(format, v...))
+}
+
+func (p *parser) expect(typ itemType) item {
+ it := p.next()
+ p.assertEqual(typ, it.typ)
+ return it
+}
+
+func (p *parser) assertEqual(expected, got itemType) {
+ if expected != got {
+ p.bug("Expected '%s' but got '%s'.", expected, got)
+ }
+}
+
+func (p *parser) topLevel(item item) {
+ switch item.typ {
+ case itemCommentStart:
+ p.approxLine = item.line
+ p.expect(itemText)
+ case itemTableStart:
+ kg := p.next()
+ p.approxLine = kg.line
+
+ var key Key
+ for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
+ key = append(key, p.keyString(kg))
+ }
+ p.assertEqual(itemTableEnd, kg.typ)
+
+ p.establishContext(key, false)
+ p.setType("", tomlHash)
+ p.ordered = append(p.ordered, key)
+ case itemArrayTableStart:
+ kg := p.next()
+ p.approxLine = kg.line
+
+ var key Key
+ for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
+ key = append(key, p.keyString(kg))
+ }
+ p.assertEqual(itemArrayTableEnd, kg.typ)
+
+ p.establishContext(key, true)
+ p.setType("", tomlArrayHash)
+ p.ordered = append(p.ordered, key)
+ case itemKeyStart:
+ kname := p.next()
+ p.approxLine = kname.line
+ p.currentKey = p.keyString(kname)
+
+ val, typ := p.value(p.next())
+ p.setValue(p.currentKey, val)
+ p.setType(p.currentKey, typ)
+ p.ordered = append(p.ordered, p.context.add(p.currentKey))
+ p.currentKey = ""
+ default:
+ p.bug("Unexpected type at top level: %s", item.typ)
+ }
+}
+
+// Gets a string for a key (or part of a key in a table name).
+func (p *parser) keyString(it item) string {
+ switch it.typ {
+ case itemText:
+ return it.val
+ case itemString, itemMultilineString,
+ itemRawString, itemRawMultilineString:
+ s, _ := p.value(it)
+ return s.(string)
+ default:
+ p.bug("Unexpected key type: %s", it.typ)
+ panic("unreachable")
+ }
+}
+
+// value translates an expected value from the lexer into a Go value wrapped
+// as an empty interface.
+func (p *parser) value(it item) (interface{}, tomlType) {
+ switch it.typ {
+ case itemString:
+ return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
+ case itemMultilineString:
+ trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
+ return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
+ case itemRawString:
+ return it.val, p.typeOfPrimitive(it)
+ case itemRawMultilineString:
+ return stripFirstNewline(it.val), p.typeOfPrimitive(it)
+ case itemBool:
+ switch it.val {
+ case "true":
+ return true, p.typeOfPrimitive(it)
+ case "false":
+ return false, p.typeOfPrimitive(it)
+ }
+ p.bug("Expected boolean value, but got '%s'.", it.val)
+ case itemInteger:
+ num, err := strconv.ParseInt(it.val, 10, 64)
+ if err != nil {
+ // See comment below for floats describing why we make a
+ // distinction between a bug and a user error.
+ if e, ok := err.(*strconv.NumError); ok &&
+ e.Err == strconv.ErrRange {
+
+ p.panicf("Integer '%s' is out of the range of 64-bit "+
+ "signed integers.", it.val)
+ } else {
+ p.bug("Expected integer value, but got '%s'.", it.val)
+ }
+ }
+ return num, p.typeOfPrimitive(it)
+ case itemFloat:
+ num, err := strconv.ParseFloat(it.val, 64)
+ if err != nil {
+ // Distinguish float values. Normally, it'd be a bug if the lexer
+ // provides an invalid float, but it's possible that the float is
+ // out of range of valid values (which the lexer cannot determine).
+ // So mark the former as a bug but the latter as a legitimate user
+ // error.
+ //
+ // This is also true for integers.
+ if e, ok := err.(*strconv.NumError); ok &&
+ e.Err == strconv.ErrRange {
+
+ p.panicf("Float '%s' is out of the range of 64-bit "+
+ "IEEE-754 floating-point numbers.", it.val)
+ } else {
+ p.bug("Expected float value, but got '%s'.", it.val)
+ }
+ }
+ return num, p.typeOfPrimitive(it)
+ case itemDatetime:
+ t, err := time.Parse("2006-01-02T15:04:05Z", it.val)
+ if err != nil {
+ p.panicf("Invalid RFC3339 Zulu DateTime: '%s'.", it.val)
+ }
+ return t, p.typeOfPrimitive(it)
+ case itemArray:
+ array := make([]interface{}, 0)
+ types := make([]tomlType, 0)
+
+ for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
+ if it.typ == itemCommentStart {
+ p.expect(itemText)
+ continue
+ }
+
+ val, typ := p.value(it)
+ array = append(array, val)
+ types = append(types, typ)
+ }
+ return array, p.typeOfArray(types)
+ }
+ p.bug("Unexpected value type: %s", it.typ)
+ panic("unreachable")
+}
+
+// establishContext sets the current context of the parser,
+// where the context is either a hash or an array of hashes. Which one is
+// set depends on the value of the `array` parameter.
+//
+// Establishing the context also makes sure that the key isn't a duplicate, and
+// will create implicit hashes automatically.
+func (p *parser) establishContext(key Key, array bool) {
+ var ok bool
+
+ // Always start at the top level and drill down for our context.
+ hashContext := p.mapping
+ keyContext := make(Key, 0)
+
+ // We only need implicit hashes for key[0:-1]
+ for _, k := range key[0 : len(key)-1] {
+ _, ok = hashContext[k]
+ keyContext = append(keyContext, k)
+
+ // No key? Make an implicit hash and move on.
+ if !ok {
+ p.addImplicit(keyContext)
+ hashContext[k] = make(map[string]interface{})
+ }
+
+ // If the hash context is actually an array of tables, then set
+ // the hash context to the last element in that array.
+ //
+ // Otherwise, it better be a table, since this MUST be a key group (by
+ // virtue of it not being the last element in a key).
+ switch t := hashContext[k].(type) {
+ case []map[string]interface{}:
+ hashContext = t[len(t)-1]
+ case map[string]interface{}:
+ hashContext = t
+ default:
+ p.panicf("Key '%s' was already created as a hash.", keyContext)
+ }
+ }
+
+ p.context = keyContext
+ if array {
+ // If this is the first element for this array, then allocate a new
+ // list of tables for it.
+ k := key[len(key)-1]
+ if _, ok := hashContext[k]; !ok {
+ hashContext[k] = make([]map[string]interface{}, 0, 5)
+ }
+
+ // Add a new table. But make sure the key hasn't already been used
+ // for something else.
+ if hash, ok := hashContext[k].([]map[string]interface{}); ok {
+ hashContext[k] = append(hash, make(map[string]interface{}))
+ } else {
+ p.panicf("Key '%s' was already created and cannot be used as "+
+ "an array.", keyContext)
+ }
+ } else {
+ p.setValue(key[len(key)-1], make(map[string]interface{}))
+ }
+ p.context = append(p.context, key[len(key)-1])
+}
+
+// setValue sets the given key to the given value in the current context.
+// It will make sure that the key hasn't already been defined, account for
+// implicit key groups.
+func (p *parser) setValue(key string, value interface{}) {
+ var tmpHash interface{}
+ var ok bool
+
+ hash := p.mapping
+ keyContext := make(Key, 0)
+ for _, k := range p.context {
+ keyContext = append(keyContext, k)
+ if tmpHash, ok = hash[k]; !ok {
+ p.bug("Context for key '%s' has not been established.", keyContext)
+ }
+ switch t := tmpHash.(type) {
+ case []map[string]interface{}:
+ // The context is a table of hashes. Pick the most recent table
+ // defined as the current hash.
+ hash = t[len(t)-1]
+ case map[string]interface{}:
+ hash = t
+ default:
+ p.bug("Expected hash to have type 'map[string]interface{}', but "+
+ "it has '%T' instead.", tmpHash)
+ }
+ }
+ keyContext = append(keyContext, key)
+
+ if _, ok := hash[key]; ok {
+ // Typically, if the given key has already been set, then we have
+ // to raise an error since duplicate keys are disallowed. However,
+ // it's possible that a key was previously defined implicitly. In this
+ // case, it is allowed to be redefined concretely. (See the
+ // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
+ //
+ // But we have to make sure to stop marking it as an implicit. (So that
+ // another redefinition provokes an error.)
+ //
+ // Note that since it has already been defined (as a hash), we don't
+ // want to overwrite it. So our business is done.
+ if p.isImplicit(keyContext) {
+ p.removeImplicit(keyContext)
+ return
+ }
+
+ // Otherwise, we have a concrete key trying to override a previous
+ // key, which is *always* wrong.
+ p.panicf("Key '%s' has already been defined.", keyContext)
+ }
+ hash[key] = value
+}
+
+// setType sets the type of a particular value at a given key.
+// It should be called immediately AFTER setValue.
+//
+// Note that if `key` is empty, then the type given will be applied to the
+// current context (which is either a table or an array of tables).
+func (p *parser) setType(key string, typ tomlType) {
+ keyContext := make(Key, 0, len(p.context)+1)
+ for _, k := range p.context {
+ keyContext = append(keyContext, k)
+ }
+ if len(key) > 0 { // allow type setting for hashes
+ keyContext = append(keyContext, key)
+ }
+ p.types[keyContext.String()] = typ
+}
+
+// addImplicit sets the given Key as having been created implicitly.
+func (p *parser) addImplicit(key Key) {
+ p.implicits[key.String()] = true
+}
+
+// removeImplicit stops tagging the given key as having been implicitly
+// created.
+func (p *parser) removeImplicit(key Key) {
+ p.implicits[key.String()] = false
+}
+
+// isImplicit returns true if the key group pointed to by the key was created
+// implicitly.
+func (p *parser) isImplicit(key Key) bool {
+ return p.implicits[key.String()]
+}
+
+// current returns the full key name of the current context.
+func (p *parser) current() string {
+ if len(p.currentKey) == 0 {
+ return p.context.String()
+ }
+ if len(p.context) == 0 {
+ return p.currentKey
+ }
+ return fmt.Sprintf("%s.%s", p.context, p.currentKey)
+}
+
+func stripFirstNewline(s string) string {
+ if len(s) == 0 || s[0] != '\n' {
+ return s
+ }
+ return s[1:]
+}
+
+func stripEscapedWhitespace(s string) string {
+ esc := strings.Split(s, "\\\n")
+ if len(esc) > 1 {
+ for i := 1; i < len(esc); i++ {
+ esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
+ }
+ }
+ return strings.Join(esc, "")
+}
+
+func (p *parser) replaceEscapes(str string) string {
+ var replaced []rune
+ s := []byte(str)
+ r := 0
+ for r < len(s) {
+ if s[r] != '\\' {
+ c, size := utf8.DecodeRune(s[r:])
+ r += size
+ replaced = append(replaced, c)
+ continue
+ }
+ r += 1
+ if r >= len(s) {
+ p.bug("Escape sequence at end of string.")
+ return ""
+ }
+ switch s[r] {
+ default:
+ p.bug("Expected valid escape code after \\, but got %q.", s[r])
+ return ""
+ case 'b':
+ replaced = append(replaced, rune(0x0008))
+ r += 1
+ case 't':
+ replaced = append(replaced, rune(0x0009))
+ r += 1
+ case 'n':
+ replaced = append(replaced, rune(0x000A))
+ r += 1
+ case 'f':
+ replaced = append(replaced, rune(0x000C))
+ r += 1
+ case 'r':
+ replaced = append(replaced, rune(0x000D))
+ r += 1
+ case '"':
+ replaced = append(replaced, rune(0x0022))
+ r += 1
+ case '\\':
+ replaced = append(replaced, rune(0x005C))
+ r += 1
+ case 'u':
+ // At this point, we know we have a Unicode escape of the form
+ // `uXXXX` at [r, r+5). (Because the lexer guarantees this
+ // for us.)
+ escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
+ replaced = append(replaced, escaped)
+ r += 5
+ case 'U':
+ // At this point, we know we have a Unicode escape of the form
+ // `uXXXX` at [r, r+9). (Because the lexer guarantees this
+ // for us.)
+ escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
+ replaced = append(replaced, escaped)
+ r += 9
+ }
+ }
+ return string(replaced)
+}
+
+func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
+ s := string(bs)
+ hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
+ if err != nil {
+ p.bug("Could not parse '%s' as a hexadecimal number, but the "+
+ "lexer claims it's OK: %s", s, err)
+ }
+ if !utf8.ValidRune(rune(hex)) {
+ p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
+ }
+ return rune(hex)
+}
+
+func isStringType(ty itemType) bool {
+ return ty == itemString || ty == itemMultilineString ||
+ ty == itemRawString || ty == itemRawMultilineString
+}
diff --git a/vendor/github.com/BurntSushi/toml/session.vim b/vendor/github.com/BurntSushi/toml/session.vim
new file mode 100644
index 0000000000..562164be06
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/session.vim
@@ -0,0 +1 @@
+au BufWritePost *.go silent!make tags > /dev/null 2>&1
diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go
new file mode 100644
index 0000000000..c73f8afc1a
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/type_check.go
@@ -0,0 +1,91 @@
+package toml
+
+// tomlType represents any Go type that corresponds to a TOML type.
+// While the first draft of the TOML spec has a simplistic type system that
+// probably doesn't need this level of sophistication, we seem to be militating
+// toward adding real composite types.
+type tomlType interface {
+ typeString() string
+}
+
+// typeEqual accepts any two types and returns true if they are equal.
+func typeEqual(t1, t2 tomlType) bool {
+ if t1 == nil || t2 == nil {
+ return false
+ }
+ return t1.typeString() == t2.typeString()
+}
+
+func typeIsHash(t tomlType) bool {
+ return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
+}
+
+type tomlBaseType string
+
+func (btype tomlBaseType) typeString() string {
+ return string(btype)
+}
+
+func (btype tomlBaseType) String() string {
+ return btype.typeString()
+}
+
+var (
+ tomlInteger tomlBaseType = "Integer"
+ tomlFloat tomlBaseType = "Float"
+ tomlDatetime tomlBaseType = "Datetime"
+ tomlString tomlBaseType = "String"
+ tomlBool tomlBaseType = "Bool"
+ tomlArray tomlBaseType = "Array"
+ tomlHash tomlBaseType = "Hash"
+ tomlArrayHash tomlBaseType = "ArrayHash"
+)
+
+// typeOfPrimitive returns a tomlType of any primitive value in TOML.
+// Primitive values are: Integer, Float, Datetime, String and Bool.
+//
+// Passing a lexer item other than the following will cause a BUG message
+// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
+func (p *parser) typeOfPrimitive(lexItem item) tomlType {
+ switch lexItem.typ {
+ case itemInteger:
+ return tomlInteger
+ case itemFloat:
+ return tomlFloat
+ case itemDatetime:
+ return tomlDatetime
+ case itemString:
+ return tomlString
+ case itemMultilineString:
+ return tomlString
+ case itemRawString:
+ return tomlString
+ case itemRawMultilineString:
+ return tomlString
+ case itemBool:
+ return tomlBool
+ }
+ p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
+ panic("unreachable")
+}
+
+// typeOfArray returns a tomlType for an array given a list of types of its
+// values.
+//
+// In the current spec, if an array is homogeneous, then its type is always
+// "Array". If the array is not homogeneous, an error is generated.
+func (p *parser) typeOfArray(types []tomlType) tomlType {
+ // Empty arrays are cool.
+ if len(types) == 0 {
+ return tomlArray
+ }
+
+ theType := types[0]
+ for _, t := range types[1:] {
+ if !typeEqual(theType, t) {
+ p.panicf("Array contains values of type '%s' and '%s', but "+
+ "arrays must be homogeneous.", theType, t)
+ }
+ }
+ return tomlArray
+}
diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go
new file mode 100644
index 0000000000..6da608af46
--- /dev/null
+++ b/vendor/github.com/BurntSushi/toml/type_fields.go
@@ -0,0 +1,241 @@
+package toml
+
+// Struct field handling is adapted from code in encoding/json:
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the Go distribution.
+
+import (
+ "reflect"
+ "sort"
+ "sync"
+)
+
+// A field represents a single field found in a struct.
+type field struct {
+ name string // the name of the field (`toml` tag included)
+ tag bool // whether field has a `toml` tag
+ index []int // represents the depth of an anonymous field
+ typ reflect.Type // the type of the field
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from toml tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+ if x[i].name != x[j].name {
+ return x[i].name < x[j].name
+ }
+ if len(x[i].index) != len(x[j].index) {
+ return len(x[i].index) < len(x[j].index)
+ }
+ if x[i].tag != x[j].tag {
+ return x[i].tag
+ }
+ return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+ for k, xik := range x[i].index {
+ if k >= len(x[j].index) {
+ return false
+ }
+ if xik != x[j].index[k] {
+ return xik < x[j].index[k]
+ }
+ }
+ return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that TOML should recognize for the given
+// type. The algorithm is breadth-first search over the set of structs to
+// include - the top struct and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+ // Anonymous fields to explore at the current level and the next.
+ current := []field{}
+ next := []field{{typ: t}}
+
+ // Count of queued names for current level and the next.
+ count := map[reflect.Type]int{}
+ nextCount := map[reflect.Type]int{}
+
+ // Types already visited at an earlier level.
+ visited := map[reflect.Type]bool{}
+
+ // Fields found.
+ var fields []field
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count, nextCount = nextCount, map[reflect.Type]int{}
+
+ for _, f := range current {
+ if visited[f.typ] {
+ continue
+ }
+ visited[f.typ] = true
+
+ // Scan f.typ for fields to include.
+ for i := 0; i < f.typ.NumField(); i++ {
+ sf := f.typ.Field(i)
+ if sf.PkgPath != "" && !sf.Anonymous { // unexported
+ continue
+ }
+ name, _ := getOptions(sf.Tag.Get("toml"))
+ if name == "-" {
+ continue
+ }
+ index := make([]int, len(f.index)+1)
+ copy(index, f.index)
+ index[len(f.index)] = i
+
+ ft := sf.Type
+ if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+ // Follow pointer.
+ ft = ft.Elem()
+ }
+
+ // Record found field and index sequence.
+ if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+ tagged := name != ""
+ if name == "" {
+ name = sf.Name
+ }
+ fields = append(fields, field{name, tagged, index, ft})
+ if count[f.typ] > 1 {
+ // If there were multiple instances, add a second,
+ // so that the annihilation code will see a duplicate.
+ // It only cares about the distinction between 1 or 2,
+ // so don't bother generating any more copies.
+ fields = append(fields, fields[len(fields)-1])
+ }
+ continue
+ }
+
+ // Record new anonymous struct to explore in next round.
+ nextCount[ft]++
+ if nextCount[ft] == 1 {
+ f := field{name: ft.Name(), index: index, typ: ft}
+ next = append(next, f)
+ }
+ }
+ }
+ }
+
+ sort.Sort(byName(fields))
+
+ // Delete all fields that are hidden by the Go rules for embedded fields,
+ // except that fields with TOML tags are promoted.
+
+ // The fields are sorted in primary order of name, secondary order
+ // of field index length. Loop over names; for each name, delete
+ // hidden fields by choosing the one dominant field that survives.
+ out := fields[:0]
+ for advance, i := 0, 0; i < len(fields); i += advance {
+ // One iteration per name.
+ // Find the sequence of fields with the name of this first field.
+ fi := fields[i]
+ name := fi.name
+ for advance = 1; i+advance < len(fields); advance++ {
+ fj := fields[i+advance]
+ if fj.name != name {
+ break
+ }
+ }
+ if advance == 1 { // Only one field with this name
+ out = append(out, fi)
+ continue
+ }
+ dominant, ok := dominantField(fields[i : i+advance])
+ if ok {
+ out = append(out, dominant)
+ }
+ }
+
+ fields = out
+ sort.Sort(byIndex(fields))
+
+ return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// TOML tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+ // The fields are sorted in increasing index-length order. The winner
+ // must therefore be one with the shortest index length. Drop all
+ // longer entries, which is easy: just truncate the slice.
+ length := len(fields[0].index)
+ tagged := -1 // Index of first tagged field.
+ for i, f := range fields {
+ if len(f.index) > length {
+ fields = fields[:i]
+ break
+ }
+ if f.tag {
+ if tagged >= 0 {
+ // Multiple tagged fields at the same level: conflict.
+ // Return no field.
+ return field{}, false
+ }
+ tagged = i
+ }
+ }
+ if tagged >= 0 {
+ return fields[tagged], true
+ }
+ // All remaining fields have the same length. If there's more than one,
+ // we have a conflict (two fields named "X" at the same level) and we
+ // return no field.
+ if len(fields) > 1 {
+ return field{}, false
+ }
+ return fields[0], true
+}
+
+var fieldCache struct {
+ sync.RWMutex
+ m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+ fieldCache.RLock()
+ f := fieldCache.m[t]
+ fieldCache.RUnlock()
+ if f != nil {
+ return f
+ }
+
+ // Compute fields without lock.
+ // Might duplicate effort but won't hold other computations back.
+ f = typeFields(t)
+ if f == nil {
+ f = []field{}
+ }
+
+ fieldCache.Lock()
+ if fieldCache.m == nil {
+ fieldCache.m = map[reflect.Type][]field{}
+ }
+ fieldCache.m[t] = f
+ fieldCache.Unlock()
+ return f
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/.gitignore b/vendor/github.com/PuerkitoBio/pigeon/.gitignore
new file mode 100644
index 0000000000..568f126036
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/.gitignore
@@ -0,0 +1,10 @@
+.*.swp
+.*.swo
+.swp
+*.test
+bootstrap/cmd/bootstrap-pigeon/bootstrap-pigeon
+bootstrap/cmd/bootstrap-build/bootstrap-build
+bootstrap/cmd/pegscan/pegscan
+bootstrap/cmd/pegparse/pegparse
+bin/
+pigeon
diff --git a/vendor/github.com/PuerkitoBio/pigeon/.travis.yml b/vendor/github.com/PuerkitoBio/pigeon/.travis.yml
new file mode 100644
index 0000000000..9720079803
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/.travis.yml
@@ -0,0 +1,8 @@
+language: go
+
+script: go test -v ./...
+
+go:
+ - 1.1
+ - 1.4
+ - tip
diff --git a/vendor/github.com/PuerkitoBio/pigeon/CONTRIBUTING.md b/vendor/github.com/PuerkitoBio/pigeon/CONTRIBUTING.md
new file mode 100644
index 0000000000..d272ac517d
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/CONTRIBUTING.md
@@ -0,0 +1,33 @@
+# Contributing to pigeon
+
+There are various ways to help support this open source project:
+
+* if you use pigeon and find it useful, talk about it - that's probably the most basic way to help any open-source project: getting the word out that it exists and that it can be useful
+* if you use pigeon and find bugs, please [file an issue][0]
+* if something is poorly documented, or doesn't work as documented, this is also a bug, please [file an issue][0]
+* if you can fix the issue (whether it is documentation- or code-related), then [submit a pull-request][1] - but read on to see what should be done to get it merged
+* if you would like to see some new feature/behaviour being implemented, please first [open an issue][0] to discuss it because features are less likely to get merged compared to bug fixes
+
+## Submitting a pull request
+
+Assuming you already have a copy of the repository (either via `go get`, a github fork, a clone, etc.), you will also need `make` to regenerate all tools and files generated when a dependency changes. I use GNU make version 4.1, other versions of make may work too but haven't been tested.
+
+Run `make` in the root directory of the repository. That will create the bootstrap builder, the bootstrap parser, and the final parser, along with some generated Go files. Once `make` is run successfully, run `go test ./...` in the root directory to make sure all tests pass.
+
+Once this is done and tests pass, you can start implementing the bug fix (or the new feature provided **it has already been discussed and agreed in a github issue** first).
+
+For a bug fix, the best way to proceed is to first write a test that proves the bug, then write the code that fixes the bug and makes the test pass. All other tests should still pass too (unless it relied on the buggy behaviour, in which case existing tests must be fixed).
+
+For a new feature, it must be thoroughly tested. New code without new test(s) is unlikely to get merged.
+
+Respect the coding style of the repository, which means essentially to respect the [coding guidelines of the Go community][2]. Use `gofmt` to format your code, and `goimports` to add and format the list of imported packages (or do it manually, but in a `goimports`-style).
+
+Once all code is done and tests pass, regenerate the whole tree with `make`, run `make lint` to make sure the code is correct, and run tests again. You are now ready to submit the pull request.
+
+## Licensing
+
+All pull requests that get merged will be made available under the BSD 3-Clause license (see the LICENSE file for details), as the rest of the pigeon repository. Do not submit pull requests if you do not want your contributions to be made available under those terms.
+
+[0]: https://github.com/PuerkitoBio/pigeon/issues/new
+[1]: https://github.com/PuerkitoBio/pigeon/pulls
+[2]: https://github.com/golang/go/wiki/CodeReviewComments
diff --git a/vendor/github.com/PuerkitoBio/pigeon/LICENSE b/vendor/github.com/PuerkitoBio/pigeon/LICENSE
new file mode 100644
index 0000000000..2c684aaf65
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/LICENSE
@@ -0,0 +1,12 @@
+Copyright (c) 2015, Martin Angers & Contributors
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+
+* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/PuerkitoBio/pigeon/Makefile b/vendor/github.com/PuerkitoBio/pigeon/Makefile
new file mode 100644
index 0000000000..86a4e62f4f
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/Makefile
@@ -0,0 +1,92 @@
+SHELL = /bin/sh
+
+# directories and source code lists
+ROOT = .
+ROOT_SRC = $(ROOT)/*.go
+BINDIR = ./bin
+EXAMPLES_DIR = $(ROOT)/examples
+TEST_DIR = $(ROOT)/test
+
+# builder and ast packages
+BUILDER_DIR = $(ROOT)/builder
+BUILDER_SRC = $(BUILDER_DIR)/*.go
+AST_DIR = $(ROOT)/ast
+AST_SRC = $(AST_DIR)/*.go
+
+# bootstrap tools variables
+BOOTSTRAP_DIR = $(ROOT)/bootstrap
+BOOTSTRAP_SRC = $(BOOTSTRAP_DIR)/*.go
+BOOTSTRAPBUILD_DIR = $(BOOTSTRAP_DIR)/cmd/bootstrap-build
+BOOTSTRAPBUILD_SRC = $(BOOTSTRAPBUILD_DIR)/*.go
+BOOTSTRAPPIGEON_DIR = $(BOOTSTRAP_DIR)/cmd/bootstrap-pigeon
+BOOTSTRAPPIGEON_SRC = $(BOOTSTRAPPIGEON_DIR)/*.go
+
+# grammar variables
+GRAMMAR_DIR = $(ROOT)/grammar
+BOOTSTRAP_GRAMMAR = $(GRAMMAR_DIR)/bootstrap.peg
+PIGEON_GRAMMAR = $(GRAMMAR_DIR)/pigeon.peg
+
+TEST_GENERATED_SRC = $(patsubst %.peg,%.go,$(shell echo ./{examples,test}/**/*.peg))
+
+all: $(BINDIR)/bootstrap-build $(BOOTSTRAPPIGEON_DIR)/bootstrap_pigeon.go \
+ $(BINDIR)/bootstrap-pigeon $(ROOT)/pigeon.go $(BINDIR)/pigeon \
+ $(TEST_GENERATED_SRC)
+
+$(BINDIR)/bootstrap-build: $(BOOTSTRAPBUILD_SRC) $(BOOTSTRAP_SRC) $(BUILDER_SRC) \
+ $(AST_SRC)
+ go build -o $@ $(BOOTSTRAPBUILD_DIR)
+
+$(BOOTSTRAPPIGEON_DIR)/bootstrap_pigeon.go: $(BINDIR)/bootstrap-build \
+ $(BOOTSTRAP_GRAMMAR)
+ $(BINDIR)/bootstrap-build $(BOOTSTRAP_GRAMMAR) | goimports > $@
+
+$(BINDIR)/bootstrap-pigeon: $(BOOTSTRAPPIGEON_SRC) \
+ $(BOOTSTRAPPIGEON_DIR)/bootstrap_pigeon.go
+ go build -o $@ $(BOOTSTRAPPIGEON_DIR)
+
+$(ROOT)/pigeon.go: $(BINDIR)/bootstrap-pigeon $(PIGEON_GRAMMAR)
+ $(BINDIR)/bootstrap-pigeon $(PIGEON_GRAMMAR) | goimports > $@
+
+$(BINDIR)/pigeon: $(ROOT_SRC) $(ROOT)/pigeon.go
+ go build -o $@ $(ROOT)
+
+$(BOOTSTRAP_GRAMMAR):
+$(PIGEON_GRAMMAR):
+
+# surely there's a better way to define the examples and test targets
+
+$(EXAMPLES_DIR)/json/json.go: $(EXAMPLES_DIR)/json/json.peg $(BINDIR)/pigeon
+ $(BINDIR)/pigeon $< | goimports > $@
+
+$(EXAMPLES_DIR)/calculator/calculator.go: $(EXAMPLES_DIR)/calculator/calculator.peg $(BINDIR)/pigeon
+ $(BINDIR)/pigeon $< | goimports > $@
+
+$(TEST_DIR)/andnot/andnot.go: $(TEST_DIR)/andnot/andnot.peg $(BINDIR)/pigeon
+ $(BINDIR)/pigeon $< | goimports > $@
+
+$(TEST_DIR)/predicates/predicates.go: $(TEST_DIR)/predicates/predicates.peg $(BINDIR)/pigeon
+ $(BINDIR)/pigeon $< | goimports > $@
+
+$(TEST_DIR)/issue_1/issue_1.go: $(TEST_DIR)/issue_1/issue_1.peg $(BINDIR)/pigeon
+ $(BINDIR)/pigeon $< | goimports > $@
+
+$(TEST_DIR)/linear/linear.go: $(TEST_DIR)/linear/linear.peg $(BINDIR)/pigeon
+ $(BINDIR)/pigeon $< | goimports > $@
+
+lint:
+ golint ./...
+ go vet ./...
+
+cmp:
+ @boot=$$(mktemp) && $(BINDIR)/bootstrap-pigeon $(PIGEON_GRAMMAR) | goimports > $$boot && \
+ official=$$(mktemp) && $(BINDIR)/pigeon $(PIGEON_GRAMMAR) | goimports > $$official && \
+ cmp $$boot $$official && \
+ unlink $$boot && \
+ unlink $$official
+
+clean:
+ rm $(BOOTSTRAPPIGEON_DIR)/bootstrap_pigeon.go $(ROOT)/pigeon.go $(TEST_GENERATED_SRC)
+ rm -rf $(BINDIR)
+
+.PHONY: all clean lint cmp
+
diff --git a/vendor/github.com/PuerkitoBio/pigeon/README.md b/vendor/github.com/PuerkitoBio/pigeon/README.md
new file mode 100644
index 0000000000..d1ea91bcd3
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/README.md
@@ -0,0 +1,144 @@
+# pigeon - a PEG parser generator for Go
+
+[![GoDoc](https://godoc.org/github.com/PuerkitoBio/pigeon?status.png)](https://godoc.org/github.com/PuerkitoBio/pigeon)
+[![build status](https://secure.travis-ci.org/PuerkitoBio/pigeon.png?branch=master)](http://travis-ci.org/PuerkitoBio/pigeon)
+[![Software License](https://img.shields.io/badge/license-BSD-blue.svg)](LICENSE)
+
+The pigeon command generates parsers based on a [parsing expression grammar (PEG)][0]. Its grammar and syntax is inspired by the [PEG.js project][1], while the implementation is loosely based on the [parsing expression grammar for C# 3.0][2] article. It parses Unicode text encoded in UTF-8.
+
+See the [godoc page][3] for detailed usage.
+
+## Installation
+
+Provided you have Go correctly installed with the $GOPATH and $GOBIN environment variables set, run:
+
+```
+$ go get -u github.com/PuerkitoBio/pigeon
+```
+
+This will install or update the package, and the `pigeon` command will be installed in your $GOBIN directory. Neither this package nor the parsers generated by this command require any third-party dependency, unless such a dependency is used in the code blocks of the grammar.
+
+## Basic usage
+
+```
+$ pigeon [options] [PEG_GRAMMAR_FILE]
+```
+
+By default, the input grammar is read from `stdin` and the generated code is printed to `stdout`. You may save it in a file using the `-o` flag, but pigeon makes no attempt to format the generated code, nor does it try to generate the required imports, because such a tool already exists. The recommended way to generate a properly formatted and working parser is to pipe the output of pigeon through the `goimports` tool:
+
+```
+$ pigeon my_revolutionary_programming_language.peg | goimports > main.go
+```
+
+This way, the generated code has all the necessary imports and is properly formatted. You can install `goimports` using:
+
+```
+$ go get golang.org/x/tools/cmd/goimports
+```
+
+See the [godoc page][3] for detailed usage.
+
+## Example
+
+Given the following grammar:
+
+```
+{
+// part of the initializer code block omitted for brevity
+
+var ops = map[string]func(int, int) int {
+ "+": func(l, r int) int {
+ return l + r
+ },
+ "-": func(l, r int) int {
+ return l - r
+ },
+ "*": func(l, r int) int {
+ return l * r
+ },
+ "/": func(l, r int) int {
+ return l / r
+ },
+}
+
+func toIfaceSlice(v interface{}) []interface{} {
+ if v == nil {
+ return nil
+ }
+ return v.([]interface{})
+}
+
+func eval(first, rest interface{}) int {
+ l := first.(int)
+ restSl := toIfaceSlice(rest)
+ for _, v := range restSl {
+ restExpr := toIfaceSlice(v)
+ r := restExpr[3].(int)
+ op := restExpr[1].(string)
+ l = ops[op](l, r)
+ }
+ return l
+}
+}
+
+
+Input <- expr:Expr EOF {
+ return expr, nil
+}
+
+Expr <- _ first:Term rest:( _ AddOp _ Term )* _ {
+ return eval(first, rest), nil
+}
+
+Term <- first:Factor rest:( _ MulOp _ Factor )* {
+ return eval(first, rest), nil
+}
+
+Factor <- '(' expr:Expr ')' {
+ return expr, nil
+} / integer:Integer {
+ return integer, nil
+}
+
+AddOp <- ( '+' / '-' ) {
+ return string(c.text), nil
+}
+
+MulOp <- ( '*' / '/' ) {
+ return string(c.text), nil
+}
+
+Integer <- '-'? [0-9]+ {
+ return strconv.Atoi(string(c.text))
+}
+
+_ "whitespace" <- [ \n\t\r]*
+
+EOF <- !.
+```
+
+The generated parser can parse simple arithmetic operations, e.g.:
+
+```
+18 + 3 - 27 * (-18 / -3)
+
+=> -141
+```
+
+More examples can be found in the `examples/` subdirectory.
+
+See the [godoc page][3] for detailed usage.
+
+## Contributing
+
+See the CONTRIBUTING.md file.
+
+## License
+
+The [BSD 3-Clause license][4]. See the LICENSE file.
+
+[0]: http://en.wikipedia.org/wiki/Parsing_expression_grammar
+[1]: http://pegjs.org/
+[2]: http://www.codeproject.com/Articles/29713/Parsing-Expression-Grammar-Support-for-C-Part
+[3]: https://godoc.org/github.com/PuerkitoBio/pigeon
+[4]: http://opensource.org/licenses/BSD-3-Clause
diff --git a/vendor/github.com/PuerkitoBio/pigeon/TODO b/vendor/github.com/PuerkitoBio/pigeon/TODO
new file mode 100644
index 0000000000..75a1f2145f
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/TODO
@@ -0,0 +1,3 @@
+- refactor implementation as a VM to avoid stack overflow in pathological cases (and maybe better performance): in branch wip-vm
+? options like current receiver name read directly from the grammar file
+? type annotations for generated code functions
diff --git a/vendor/github.com/PuerkitoBio/pigeon/ast/ast.go b/vendor/github.com/PuerkitoBio/pigeon/ast/ast.go
new file mode 100644
index 0000000000..33845bb9aa
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/ast/ast.go
@@ -0,0 +1,587 @@
+// Package ast defines the abstract syntax tree for the PEG grammar.
+//
+// The parser generator's PEG grammar generates a tree using this package
+// that is then converted by the builder to the simplified AST used in
+// the generated parser.
+package ast
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// Pos represents a position in a source file.
+type Pos struct {
+ Filename string
+ Line int
+ Col int
+ Off int
+}
+
+// String returns the textual representation of a position.
+func (p Pos) String() string {
+ if p.Filename != "" {
+ return fmt.Sprintf("%s:%d:%d (%d)", p.Filename, p.Line, p.Col, p.Off)
+ }
+ return fmt.Sprintf("%d:%d (%d)", p.Line, p.Col, p.Off)
+}
+
+// Grammar is the top-level node of the AST for the PEG grammar.
+type Grammar struct {
+ p Pos
+ Init *CodeBlock
+ Rules []*Rule
+}
+
+// NewGrammar creates a new grammar at the specified position.
+func NewGrammar(p Pos) *Grammar {
+ return &Grammar{p: p}
+}
+
+// Pos returns the starting position of the node.
+func (g *Grammar) Pos() Pos { return g.p }
+
+// String returns the textual representation of a node.
+func (g *Grammar) String() string {
+ var buf bytes.Buffer
+
+ buf.WriteString(fmt.Sprintf("%s: %T{Init: %v, Rules: [\n",
+ g.p, g, g.Init))
+ for _, r := range g.Rules {
+ buf.WriteString(fmt.Sprintf("%s,\n", r))
+ }
+ buf.WriteString("]}")
+ return buf.String()
+}
+
+// Rule represents a rule in the PEG grammar. It has a name, an optional
+// display name to be used in error messages, and an expression.
+type Rule struct {
+ p Pos
+ Name *Identifier
+ DisplayName *StringLit
+ Expr Expression
+}
+
+// NewRule creates a rule with at the specified position and with the
+// specified name as identifier.
+func NewRule(p Pos, name *Identifier) *Rule {
+ return &Rule{p: p, Name: name}
+}
+
+// Pos returns the starting position of the node.
+func (r *Rule) Pos() Pos { return r.p }
+
+// String returns the textual representation of a node.
+func (r *Rule) String() string {
+ return fmt.Sprintf("%s: %T{Name: %v, DisplayName: %v, Expr: %v}",
+ r.p, r, r.Name, r.DisplayName, r.Expr)
+}
+
+// Expression is the interface implemented by all expression types.
+type Expression interface {
+ Pos() Pos
+}
+
+// ChoiceExpr is an ordered sequence of expressions. The parser tries to
+// match any of the alternatives in sequence and stops at the first one
+// that matches.
+type ChoiceExpr struct {
+ p Pos
+ Alternatives []Expression
+}
+
+// NewChoiceExpr creates a choice expression at the specified position.
+func NewChoiceExpr(p Pos) *ChoiceExpr {
+ return &ChoiceExpr{p: p}
+}
+
+// Pos returns the starting position of the node.
+func (c *ChoiceExpr) Pos() Pos { return c.p }
+
+// String returns the textual representation of a node.
+func (c *ChoiceExpr) String() string {
+ var buf bytes.Buffer
+
+ buf.WriteString(fmt.Sprintf("%s: %T{Alternatives: [\n", c.p, c))
+ for _, e := range c.Alternatives {
+ buf.WriteString(fmt.Sprintf("%s,\n", e))
+ }
+ buf.WriteString("]}")
+ return buf.String()
+}
+
+// ActionExpr is an expression that has an associated block of code to
+// execute when the expression matches.
+type ActionExpr struct {
+ p Pos
+ Expr Expression
+ Code *CodeBlock
+ FuncIx int
+}
+
+// NewActionExpr creates a new action expression at the specified position.
+func NewActionExpr(p Pos) *ActionExpr {
+ return &ActionExpr{p: p}
+}
+
+// Pos returns the starting position of the node.
+func (a *ActionExpr) Pos() Pos { return a.p }
+
+// String returns the textual representation of a node.
+func (a *ActionExpr) String() string {
+ return fmt.Sprintf("%s: %T{Expr: %v, Code: %v}", a.p, a, a.Expr, a.Code)
+}
+
+// SeqExpr is an ordered sequence of expressions, all of which must match
+// if the SeqExpr is to be a match itself.
+type SeqExpr struct {
+ p Pos
+ Exprs []Expression
+}
+
+// NewSeqExpr creates a new sequence expression at the specified position.
+func NewSeqExpr(p Pos) *SeqExpr {
+ return &SeqExpr{p: p}
+}
+
+// Pos returns the starting position of the node.
+func (s *SeqExpr) Pos() Pos { return s.p }
+
+// String returns the textual representation of a node.
+func (s *SeqExpr) String() string {
+ var buf bytes.Buffer
+
+ buf.WriteString(fmt.Sprintf("%s: %T{Exprs: [\n", s.p, s))
+ for _, e := range s.Exprs {
+ buf.WriteString(fmt.Sprintf("%s,\n", e))
+ }
+ buf.WriteString("]}")
+ return buf.String()
+}
+
+// LabeledExpr is an expression that has an associated label. Code blocks
+// can access the value of the expression using that label, that becomes
+// a local variable in the code.
+type LabeledExpr struct {
+ p Pos
+ Label *Identifier
+ Expr Expression
+}
+
+// NewLabeledExpr creates a new labeled expression at the specified position.
+func NewLabeledExpr(p Pos) *LabeledExpr {
+ return &LabeledExpr{p: p}
+}
+
+// Pos returns the starting position of the node.
+func (l *LabeledExpr) Pos() Pos { return l.p }
+
+// String returns the textual representation of a node.
+func (l *LabeledExpr) String() string {
+ return fmt.Sprintf("%s: %T{Label: %v, Expr: %v}", l.p, l, l.Label, l.Expr)
+}
+
+// AndExpr is a zero-length matcher that is considered a match if the
+// expression it contains is a match.
+type AndExpr struct {
+ p Pos
+ Expr Expression
+}
+
+// NewAndExpr creates a new and (&) expression at the specified position.
+func NewAndExpr(p Pos) *AndExpr {
+ return &AndExpr{p: p}
+}
+
+// Pos returns the starting position of the node.
+func (a *AndExpr) Pos() Pos { return a.p }
+
+// String returns the textual representation of a node.
+func (a *AndExpr) String() string {
+ return fmt.Sprintf("%s: %T{Expr: %v}", a.p, a, a.Expr)
+}
+
+// NotExpr is a zero-length matcher that is considered a match if the
+// expression it contains is not a match.
+type NotExpr struct {
+ p Pos
+ Expr Expression
+}
+
+// NewNotExpr creates a new not (!) expression at the specified position.
+func NewNotExpr(p Pos) *NotExpr {
+ return &NotExpr{p: p}
+}
+
+// Pos returns the starting position of the node.
+func (n *NotExpr) Pos() Pos { return n.p }
+
+// String returns the textual representation of a node.
+func (n *NotExpr) String() string {
+ return fmt.Sprintf("%s: %T{Expr: %v}", n.p, n, n.Expr)
+}
+
+// ZeroOrOneExpr is an expression that can be matched zero or one time.
+type ZeroOrOneExpr struct {
+ p Pos
+ Expr Expression
+}
+
+// NewZeroOrOneExpr creates a new zero or one expression at the specified
+// position.
+func NewZeroOrOneExpr(p Pos) *ZeroOrOneExpr {
+ return &ZeroOrOneExpr{p: p}
+}
+
+// Pos returns the starting position of the node.
+func (z *ZeroOrOneExpr) Pos() Pos { return z.p }
+
+// String returns the textual representation of a node.
+func (z *ZeroOrOneExpr) String() string {
+ return fmt.Sprintf("%s: %T{Expr: %v}", z.p, z, z.Expr)
+}
+
+// ZeroOrMoreExpr is an expression that can be matched zero or more times.
+type ZeroOrMoreExpr struct {
+ p Pos
+ Expr Expression
+}
+
+// NewZeroOrMoreExpr creates a new zero or more expression at the specified
+// position.
+func NewZeroOrMoreExpr(p Pos) *ZeroOrMoreExpr {
+ return &ZeroOrMoreExpr{p: p}
+}
+
+// Pos returns the starting position of the node.
+func (z *ZeroOrMoreExpr) Pos() Pos { return z.p }
+
+// String returns the textual representation of a node.
+func (z *ZeroOrMoreExpr) String() string {
+ return fmt.Sprintf("%s: %T{Expr: %v}", z.p, z, z.Expr)
+}
+
+// OneOrMoreExpr is an expression that can be matched one or more times.
+type OneOrMoreExpr struct {
+ p Pos
+ Expr Expression
+}
+
+// NewOneOrMoreExpr creates a new one or more expression at the specified
+// position.
+func NewOneOrMoreExpr(p Pos) *OneOrMoreExpr {
+ return &OneOrMoreExpr{p: p}
+}
+
+// Pos returns the starting position of the node.
+func (o *OneOrMoreExpr) Pos() Pos { return o.p }
+
+// String returns the textual representation of a node.
+func (o *OneOrMoreExpr) String() string {
+ return fmt.Sprintf("%s: %T{Expr: %v}", o.p, o, o.Expr)
+}
+
+// RuleRefExpr is an expression that references a rule by name.
+type RuleRefExpr struct {
+ p Pos
+ Name *Identifier
+}
+
+// NewRuleRefExpr creates a new rule reference expression at the specified
+// position.
+func NewRuleRefExpr(p Pos) *RuleRefExpr {
+ return &RuleRefExpr{p: p}
+}
+
+// Pos returns the starting position of the node.
+func (r *RuleRefExpr) Pos() Pos { return r.p }
+
+// String returns the textual representation of a node.
+func (r *RuleRefExpr) String() string {
+ return fmt.Sprintf("%s: %T{Name: %v}", r.p, r, r.Name)
+}
+
+// AndCodeExpr is a zero-length matcher that is considered a match if the
+// code block returns true.
+type AndCodeExpr struct {
+ p Pos
+ Code *CodeBlock
+ FuncIx int
+}
+
+// NewAndCodeExpr creates a new and (&) code expression at the specified
+// position.
+func NewAndCodeExpr(p Pos) *AndCodeExpr {
+ return &AndCodeExpr{p: p}
+}
+
+// Pos returns the starting position of the node.
+func (a *AndCodeExpr) Pos() Pos { return a.p }
+
+// String returns the textual representation of a node.
+func (a *AndCodeExpr) String() string {
+ return fmt.Sprintf("%s: %T{Code: %v}", a.p, a, a.Code)
+}
+
+// NotCodeExpr is a zero-length matcher that is considered a match if the
+// code block returns false.
+type NotCodeExpr struct {
+ p Pos
+ Code *CodeBlock
+ FuncIx int
+}
+
+// NewNotCodeExpr creates a new not (!) code expression at the specified
+// position.
+func NewNotCodeExpr(p Pos) *NotCodeExpr {
+ return &NotCodeExpr{p: p}
+}
+
+// Pos returns the starting position of the node.
+func (n *NotCodeExpr) Pos() Pos { return n.p }
+
+// String returns the textual representation of a node.
+func (n *NotCodeExpr) String() string {
+ return fmt.Sprintf("%s: %T{Code: %v}", n.p, n, n.Code)
+}
+
+// LitMatcher is a string literal matcher. The value to match may be a
+// double-quoted string, a single-quoted single character, or a back-tick
+// quoted raw string.
+type LitMatcher struct {
+ posValue // can be str, rstr or char
+ IgnoreCase bool
+}
+
+// NewLitMatcher creates a new literal matcher at the specified position and
+// with the specified value.
+func NewLitMatcher(p Pos, v string) *LitMatcher {
+ return &LitMatcher{posValue: posValue{p: p, Val: v}}
+}
+
+// Pos returns the starting position of the node.
+func (l *LitMatcher) Pos() Pos { return l.p }
+
+// String returns the textual representation of a node.
+func (l *LitMatcher) String() string {
+ return fmt.Sprintf("%s: %T{Val: %q, IgnoreCase: %t}", l.p, l, l.Val, l.IgnoreCase)
+}
+
+// CharClassMatcher is a character class matcher. The value to match must
+// be one of the specified characters, in a range of characters, or in the
+// Unicode classes of characters.
+type CharClassMatcher struct {
+ posValue
+ IgnoreCase bool
+ Inverted bool
+ Chars []rune
+ Ranges []rune // pairs of low/high range
+ UnicodeClasses []string
+}
+
+// NewCharClassMatcher creates a new character class matcher at the specified
+// position and with the specified raw value. It parses the raw value into
+// the list of characters, ranges and Unicode classes.
+func NewCharClassMatcher(p Pos, raw string) *CharClassMatcher {
+ c := &CharClassMatcher{posValue: posValue{p: p, Val: raw}}
+ c.parse()
+ return c
+}
+
+func (c *CharClassMatcher) parse() {
+ raw := c.Val
+ c.IgnoreCase = strings.HasSuffix(raw, "i")
+ if c.IgnoreCase {
+ raw = raw[:len(raw)-1]
+ }
+
+ // "unquote" the character classes
+ raw = raw[1 : len(raw)-1]
+ if len(raw) == 0 {
+ return
+ }
+
+ c.Inverted = raw[0] == '^'
+ if c.Inverted {
+ raw = raw[1:]
+ if len(raw) == 0 {
+ return
+ }
+ }
+
+ // content of char class is necessarily valid, so escapes are correct
+ r := strings.NewReader(raw)
+ var chars []rune
+ var buf bytes.Buffer
+outer:
+ for {
+ rn, _, err := r.ReadRune()
+ if err != nil {
+ break outer
+ }
+
+ consumeN := 0
+ switch rn {
+ case '\\':
+ rn, _, _ := r.ReadRune()
+ switch rn {
+ case ']':
+ chars = append(chars, rn)
+ continue
+
+ case 'p':
+ rn, _, _ := r.ReadRune()
+ if rn == '{' {
+ buf.Reset()
+ for {
+ rn, _, _ := r.ReadRune()
+ if rn == '}' {
+ break
+ }
+ buf.WriteRune(rn)
+ }
+ c.UnicodeClasses = append(c.UnicodeClasses, buf.String())
+ } else {
+ c.UnicodeClasses = append(c.UnicodeClasses, string(rn))
+ }
+ continue
+
+ case 'x':
+ consumeN = 2
+ case 'u':
+ consumeN = 4
+ case 'U':
+ consumeN = 8
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ consumeN = 2
+ }
+
+ buf.Reset()
+ buf.WriteRune(rn)
+ for i := 0; i < consumeN; i++ {
+ rn, _, _ := r.ReadRune()
+ buf.WriteRune(rn)
+ }
+ rn, _, _, _ = strconv.UnquoteChar("\\"+buf.String(), 0)
+ chars = append(chars, rn)
+
+ default:
+ chars = append(chars, rn)
+ }
+ }
+
+ // extract ranges and chars
+ inRange, wasRange := false, false
+ for i, r := range chars {
+ if inRange {
+ c.Ranges = append(c.Ranges, r)
+ inRange = false
+ wasRange = true
+ continue
+ }
+
+ if r == '-' && !wasRange && len(c.Chars) > 0 && i < len(chars)-1 {
+ inRange = true
+ wasRange = false
+ // start of range is the last Char added
+ c.Ranges = append(c.Ranges, c.Chars[len(c.Chars)-1])
+ c.Chars = c.Chars[:len(c.Chars)-1]
+ continue
+ }
+ wasRange = false
+ c.Chars = append(c.Chars, r)
+ }
+}
+
+// Pos returns the starting position of the node.
+func (c *CharClassMatcher) Pos() Pos { return c.p }
+
+// String returns the textual representation of a node.
+func (c *CharClassMatcher) String() string {
+ return fmt.Sprintf("%s: %T{Val: %q, IgnoreCase: %t, Inverted: %t}",
+ c.p, c, c.Val, c.IgnoreCase, c.Inverted)
+}
+
+// AnyMatcher is a matcher that matches any character except end-of-file.
+type AnyMatcher struct {
+ posValue
+}
+
+// NewAnyMatcher creates a new any matcher at the specified position. The
+// value is provided for completeness' sake, but it is always the dot.
+func NewAnyMatcher(p Pos, v string) *AnyMatcher {
+ return &AnyMatcher{posValue{p, v}}
+}
+
+// Pos returns the starting position of the node.
+func (a *AnyMatcher) Pos() Pos { return a.p }
+
+// String returns the textual representation of a node.
+func (a *AnyMatcher) String() string {
+ return fmt.Sprintf("%s: %T{Val: %q}", a.p, a, a.Val)
+}
+
+// CodeBlock represents a code block.
+type CodeBlock struct {
+ posValue
+}
+
+// NewCodeBlock creates a new code block at the specified position and with
+// the specified value. The value includes the outer braces.
+func NewCodeBlock(p Pos, code string) *CodeBlock {
+ return &CodeBlock{posValue{p, code}}
+}
+
+// Pos returns the starting position of the node.
+func (c *CodeBlock) Pos() Pos { return c.p }
+
+// String returns the textual representation of a node.
+func (c *CodeBlock) String() string {
+ return fmt.Sprintf("%s: %T{Val: %q}", c.p, c, c.Val)
+}
+
+// Identifier represents an identifier.
+type Identifier struct {
+ posValue
+}
+
+// NewIdentifier creates a new identifier at the specified position and
+// with the specified name.
+func NewIdentifier(p Pos, name string) *Identifier {
+ return &Identifier{posValue{p: p, Val: name}}
+}
+
+// Pos returns the starting position of the node.
+func (i *Identifier) Pos() Pos { return i.p }
+
+// String returns the textual representation of a node.
+func (i *Identifier) String() string {
+ return fmt.Sprintf("%s: %T{Val: %q}", i.p, i, i.Val)
+}
+
+// StringLit represents a string literal.
+type StringLit struct {
+ posValue
+}
+
+// NewStringLit creates a new string literal at the specified position and
+// with the specified value.
+func NewStringLit(p Pos, val string) *StringLit {
+ return &StringLit{posValue{p: p, Val: val}}
+}
+
+// Pos returns the starting position of the node.
+func (s *StringLit) Pos() Pos { return s.p }
+
+// String returns the textual representation of a node.
+func (s *StringLit) String() string {
+ return fmt.Sprintf("%s: %T{Val: %q}", s.p, s, s.Val)
+}
+
+type posValue struct {
+ p Pos
+ Val string
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/ast/ast_test.go b/vendor/github.com/PuerkitoBio/pigeon/ast/ast_test.go
new file mode 100644
index 0000000000..767e9a1da8
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/ast/ast_test.go
@@ -0,0 +1,107 @@
+package ast
+
+import (
+ "strings"
+ "testing"
+ "unicode/utf8"
+)
+
+var charClasses = []string{
+ "[]",
+ "[]i",
+ "[^]",
+ "[^]i",
+ "[a]",
+ "[ab]i",
+ "[^abc]i",
+ `[\a]`,
+ `[\b\nt]`,
+ `[\b\nt\pL]`,
+ `[\p{Greek}\tz\\\pN]`,
+ `[-]`,
+ `[--]`,
+ `[---]`,
+ `[a-z]`,
+ `[a-zB0-9]`,
+ `[A-Z]i`,
+ `[a-]`,
+ `[----]`,
+ `[\x00-\x05]`,
+}
+
+var expChars = []string{
+ "",
+ "",
+ "",
+ "",
+ "a",
+ "ab",
+ "abc",
+ "\a",
+ "\b\nt",
+ "\b\nt",
+ "\tz\\",
+ "-",
+ "--",
+ "",
+ "",
+ "B",
+ "",
+ "a-",
+ "-",
+ "",
+}
+
+var expUnicodeClasses = [][]string{
+ 9: {"L"},
+ 10: {"Greek", "N"},
+ 19: nil,
+}
+
+var expRanges = []string{
+ 13: "--",
+ 14: "az",
+ 15: "az09",
+ 16: "AZ",
+ 18: "--",
+ 19: "\x00\x05",
+}
+
+func TestCharClassParse(t *testing.T) {
+ for i, c := range charClasses {
+ m := NewCharClassMatcher(Pos{}, c)
+
+ ic := strings.HasSuffix(c, "i")
+ if m.IgnoreCase != ic {
+ t.Errorf("%q: want ignore case: %t, got %t", c, ic, m.IgnoreCase)
+ }
+ iv := c[1] == '^'
+ if m.Inverted != iv {
+ t.Errorf("%q: want inverted: %t, got %t", c, iv, m.Inverted)
+ }
+
+ if n := utf8.RuneCountInString(expChars[i]); len(m.Chars) != n {
+ t.Errorf("%q: want %d chars, got %d", c, n, len(m.Chars))
+ } else if string(m.Chars) != expChars[i] {
+ t.Errorf("%q: want %q, got %q", c, expChars[i], string(m.Chars))
+ }
+
+ if n := utf8.RuneCountInString(expRanges[i]); len(m.Ranges) != n {
+ t.Errorf("%q: want %d chars, got %d", c, n, len(m.Ranges))
+ } else if string(m.Ranges) != expRanges[i] {
+ t.Errorf("%q: want %q, got %q", c, expRanges[i], string(m.Ranges))
+ }
+
+ if n := len(expUnicodeClasses[i]); len(m.UnicodeClasses) != n {
+ t.Errorf("%q: want %d Unicode classes, got %d", c, n, len(m.UnicodeClasses))
+ } else if n > 0 {
+ want := expUnicodeClasses[i]
+ got := m.UnicodeClasses
+ for j, wantClass := range want {
+ if wantClass != got[j] {
+ t.Errorf("%q: range table %d: want %v, got %v", c, j, wantClass, got[j])
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/benchparse_test.go b/vendor/github.com/PuerkitoBio/pigeon/benchparse_test.go
new file mode 100644
index 0000000000..4b307ec90f
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/benchparse_test.go
@@ -0,0 +1,36 @@
+package main
+
+import "testing"
+
+// With Unicode classes in the grammar:
+// BenchmarkParseUnicodeClass 2000 548233 ns/op 96615 B/op 978 allocs/op
+//
+// With Unicode classes in a go map:
+// BenchmarkParseUnicodeClass 5000 272224 ns/op 37990 B/op 482 allocs/op
+func BenchmarkParseUnicodeClass(b *testing.B) {
+ input := []byte("a = [\\p{Latin}]")
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ if _, err := Parse("", input); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+// With keywords in the grammar:
+// BenchmarkParseKeyword 5000 315189 ns/op 50175 B/op 530 allocs/op
+//
+// With keywords in a go map:
+// BenchmarkParseKeyword 10000 201175 ns/op 27017 B/op 331 allocs/op
+func BenchmarkParseKeyword(b *testing.B) {
+ input := []byte("a = uint32:'a'")
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ if _, err := Parse("", input); err == nil {
+ // error IS expected, fatal if none
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/bootstrap/cmd/bootstrap-build/main.go b/vendor/github.com/PuerkitoBio/pigeon/bootstrap/cmd/bootstrap-build/main.go
new file mode 100644
index 0000000000..0a931eb3c9
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/bootstrap/cmd/bootstrap-build/main.go
@@ -0,0 +1,52 @@
+// Command bootstrap-build bootstraps the PEG parser generator by
+// parsing the bootstrap grammar and creating a basic parser generator
+// sufficiently complete to parse the pigeon PEG grammar.
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "os"
+
+ "github.com/PuerkitoBio/pigeon/bootstrap"
+ "github.com/PuerkitoBio/pigeon/builder"
+)
+
+func main() {
+ outFlag := flag.String("o", "", "output file, defaults to stdout")
+ flag.Parse()
+
+ if flag.NArg() != 1 {
+ fmt.Fprintln(os.Stderr, "USAGE: bootstrap-build [-o OUTPUT] FILE")
+ os.Exit(1)
+ }
+
+ outw := os.Stdout
+ if *outFlag != "" {
+ outf, err := os.Create(*outFlag)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(2)
+ }
+ defer outf.Close()
+ outw = outf
+ }
+
+ f, err := os.Open(os.Args[1])
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(2)
+ }
+ defer f.Close()
+
+ p := bootstrap.NewParser()
+ g, err := p.Parse(os.Args[1], f)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ if err := builder.BuildParser(outw, g); err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/bootstrap/cmd/bootstrap-pigeon/bench_test.go b/vendor/github.com/PuerkitoBio/pigeon/bootstrap/cmd/bootstrap-pigeon/bench_test.go
new file mode 100644
index 0000000000..c5b2c19925
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/bootstrap/cmd/bootstrap-pigeon/bench_test.go
@@ -0,0 +1,34 @@
+package main
+
+import (
+ "io/ioutil"
+ "testing"
+)
+
+func BenchmarkParsePigeonNoMemo(b *testing.B) {
+ d, err := ioutil.ReadFile("../../../grammar/pigeon.peg")
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ if _, err := Parse("", d, Memoize(false)); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkParsePigeonMemo(b *testing.B) {
+ d, err := ioutil.ReadFile("../../../grammar/pigeon.peg")
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ if _, err := Parse("", d, Memoize(true)); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/bootstrap/cmd/bootstrap-pigeon/bootstrap_pigeon.go b/vendor/github.com/PuerkitoBio/pigeon/bootstrap/cmd/bootstrap-pigeon/bootstrap_pigeon.go
new file mode 100644
index 0000000000..b43152a421
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/bootstrap/cmd/bootstrap-pigeon/bootstrap_pigeon.go
@@ -0,0 +1,3031 @@
+package main
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/PuerkitoBio/pigeon/ast"
+)
+
+var g = &grammar{
+ rules: []*rule{
+ {
+ name: "Grammar",
+ pos: position{line: 5, col: 1, offset: 18},
+ expr: &actionExpr{
+ pos: position{line: 5, col: 11, offset: 30},
+ run: (*parser).callonGrammar1,
+ expr: &seqExpr{
+ pos: position{line: 5, col: 11, offset: 30},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 5, col: 11, offset: 30},
+ name: "__",
+ },
+ &labeledExpr{
+ pos: position{line: 5, col: 14, offset: 33},
+ label: "initializer",
+ expr: &zeroOrOneExpr{
+ pos: position{line: 5, col: 28, offset: 47},
+ expr: &seqExpr{
+ pos: position{line: 5, col: 28, offset: 47},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 5, col: 28, offset: 47},
+ name: "Initializer",
+ },
+ &ruleRefExpr{
+ pos: position{line: 5, col: 40, offset: 59},
+ name: "__",
+ },
+ },
+ },
+ },
+ },
+ &labeledExpr{
+ pos: position{line: 5, col: 46, offset: 65},
+ label: "rules",
+ expr: &oneOrMoreExpr{
+ pos: position{line: 5, col: 54, offset: 73},
+ expr: &seqExpr{
+ pos: position{line: 5, col: 54, offset: 73},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 5, col: 54, offset: 73},
+ name: "Rule",
+ },
+ &ruleRefExpr{
+ pos: position{line: 5, col: 59, offset: 78},
+ name: "__",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Initializer",
+ pos: position{line: 24, col: 1, offset: 521},
+ expr: &actionExpr{
+ pos: position{line: 24, col: 15, offset: 537},
+ run: (*parser).callonInitializer1,
+ expr: &seqExpr{
+ pos: position{line: 24, col: 15, offset: 537},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 24, col: 15, offset: 537},
+ label: "code",
+ expr: &ruleRefExpr{
+ pos: position{line: 24, col: 20, offset: 542},
+ name: "CodeBlock",
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 24, col: 30, offset: 552},
+ name: "EOS",
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Rule",
+ pos: position{line: 28, col: 1, offset: 582},
+ expr: &actionExpr{
+ pos: position{line: 28, col: 8, offset: 591},
+ run: (*parser).callonRule1,
+ expr: &seqExpr{
+ pos: position{line: 28, col: 8, offset: 591},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 28, col: 8, offset: 591},
+ label: "name",
+ expr: &ruleRefExpr{
+ pos: position{line: 28, col: 13, offset: 596},
+ name: "IdentifierName",
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 28, col: 28, offset: 611},
+ name: "__",
+ },
+ &labeledExpr{
+ pos: position{line: 28, col: 31, offset: 614},
+ label: "display",
+ expr: &zeroOrOneExpr{
+ pos: position{line: 28, col: 41, offset: 624},
+ expr: &seqExpr{
+ pos: position{line: 28, col: 41, offset: 624},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 28, col: 41, offset: 624},
+ name: "StringLiteral",
+ },
+ &ruleRefExpr{
+ pos: position{line: 28, col: 55, offset: 638},
+ name: "__",
+ },
+ },
+ },
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 28, col: 61, offset: 644},
+ name: "RuleDefOp",
+ },
+ &ruleRefExpr{
+ pos: position{line: 28, col: 71, offset: 654},
+ name: "__",
+ },
+ &labeledExpr{
+ pos: position{line: 28, col: 74, offset: 657},
+ label: "expr",
+ expr: &ruleRefExpr{
+ pos: position{line: 28, col: 79, offset: 662},
+ name: "Expression",
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 28, col: 90, offset: 673},
+ name: "EOS",
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Expression",
+ pos: position{line: 41, col: 1, offset: 957},
+ expr: &ruleRefExpr{
+ pos: position{line: 41, col: 14, offset: 972},
+ name: "ChoiceExpr",
+ },
+ },
+ {
+ name: "ChoiceExpr",
+ pos: position{line: 43, col: 1, offset: 984},
+ expr: &actionExpr{
+ pos: position{line: 43, col: 14, offset: 999},
+ run: (*parser).callonChoiceExpr1,
+ expr: &seqExpr{
+ pos: position{line: 43, col: 14, offset: 999},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 43, col: 14, offset: 999},
+ label: "first",
+ expr: &ruleRefExpr{
+ pos: position{line: 43, col: 20, offset: 1005},
+ name: "ActionExpr",
+ },
+ },
+ &labeledExpr{
+ pos: position{line: 43, col: 31, offset: 1016},
+ label: "rest",
+ expr: &zeroOrMoreExpr{
+ pos: position{line: 43, col: 38, offset: 1023},
+ expr: &seqExpr{
+ pos: position{line: 43, col: 38, offset: 1023},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 43, col: 38, offset: 1023},
+ name: "__",
+ },
+ &litMatcher{
+ pos: position{line: 43, col: 41, offset: 1026},
+ val: "/",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 43, col: 45, offset: 1030},
+ name: "__",
+ },
+ &ruleRefExpr{
+ pos: position{line: 43, col: 48, offset: 1033},
+ name: "ActionExpr",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "ActionExpr",
+ pos: position{line: 58, col: 1, offset: 1438},
+ expr: &actionExpr{
+ pos: position{line: 58, col: 14, offset: 1453},
+ run: (*parser).callonActionExpr1,
+ expr: &seqExpr{
+ pos: position{line: 58, col: 14, offset: 1453},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 58, col: 14, offset: 1453},
+ label: "expr",
+ expr: &ruleRefExpr{
+ pos: position{line: 58, col: 19, offset: 1458},
+ name: "SeqExpr",
+ },
+ },
+ &labeledExpr{
+ pos: position{line: 58, col: 27, offset: 1466},
+ label: "code",
+ expr: &zeroOrOneExpr{
+ pos: position{line: 58, col: 34, offset: 1473},
+ expr: &seqExpr{
+ pos: position{line: 58, col: 34, offset: 1473},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 58, col: 34, offset: 1473},
+ name: "__",
+ },
+ &ruleRefExpr{
+ pos: position{line: 58, col: 37, offset: 1476},
+ name: "CodeBlock",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "SeqExpr",
+ pos: position{line: 72, col: 1, offset: 1742},
+ expr: &actionExpr{
+ pos: position{line: 72, col: 11, offset: 1754},
+ run: (*parser).callonSeqExpr1,
+ expr: &seqExpr{
+ pos: position{line: 72, col: 11, offset: 1754},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 72, col: 11, offset: 1754},
+ label: "first",
+ expr: &ruleRefExpr{
+ pos: position{line: 72, col: 17, offset: 1760},
+ name: "LabeledExpr",
+ },
+ },
+ &labeledExpr{
+ pos: position{line: 72, col: 29, offset: 1772},
+ label: "rest",
+ expr: &zeroOrMoreExpr{
+ pos: position{line: 72, col: 36, offset: 1779},
+ expr: &seqExpr{
+ pos: position{line: 72, col: 36, offset: 1779},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 72, col: 36, offset: 1779},
+ name: "__",
+ },
+ &ruleRefExpr{
+ pos: position{line: 72, col: 39, offset: 1782},
+ name: "LabeledExpr",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "LabeledExpr",
+ pos: position{line: 85, col: 1, offset: 2133},
+ expr: &choiceExpr{
+ pos: position{line: 85, col: 15, offset: 2149},
+ alternatives: []interface{}{
+ &actionExpr{
+ pos: position{line: 85, col: 15, offset: 2149},
+ run: (*parser).callonLabeledExpr2,
+ expr: &seqExpr{
+ pos: position{line: 85, col: 15, offset: 2149},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 85, col: 15, offset: 2149},
+ label: "label",
+ expr: &ruleRefExpr{
+ pos: position{line: 85, col: 21, offset: 2155},
+ name: "Identifier",
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 85, col: 32, offset: 2166},
+ name: "__",
+ },
+ &litMatcher{
+ pos: position{line: 85, col: 35, offset: 2169},
+ val: ":",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 85, col: 39, offset: 2173},
+ name: "__",
+ },
+ &labeledExpr{
+ pos: position{line: 85, col: 42, offset: 2176},
+ label: "expr",
+ expr: &ruleRefExpr{
+ pos: position{line: 85, col: 47, offset: 2181},
+ name: "PrefixedExpr",
+ },
+ },
+ },
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 91, col: 5, offset: 2354},
+ name: "PrefixedExpr",
+ },
+ },
+ },
+ },
+ {
+ name: "PrefixedExpr",
+ pos: position{line: 93, col: 1, offset: 2368},
+ expr: &choiceExpr{
+ pos: position{line: 93, col: 16, offset: 2385},
+ alternatives: []interface{}{
+ &actionExpr{
+ pos: position{line: 93, col: 16, offset: 2385},
+ run: (*parser).callonPrefixedExpr2,
+ expr: &seqExpr{
+ pos: position{line: 93, col: 16, offset: 2385},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 93, col: 16, offset: 2385},
+ label: "op",
+ expr: &ruleRefExpr{
+ pos: position{line: 93, col: 19, offset: 2388},
+ name: "PrefixedOp",
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 93, col: 30, offset: 2399},
+ name: "__",
+ },
+ &labeledExpr{
+ pos: position{line: 93, col: 33, offset: 2402},
+ label: "expr",
+ expr: &ruleRefExpr{
+ pos: position{line: 93, col: 38, offset: 2407},
+ name: "SuffixedExpr",
+ },
+ },
+ },
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 104, col: 5, offset: 2689},
+ name: "SuffixedExpr",
+ },
+ },
+ },
+ },
+ {
+ name: "PrefixedOp",
+ pos: position{line: 106, col: 1, offset: 2703},
+ expr: &actionExpr{
+ pos: position{line: 106, col: 14, offset: 2718},
+ run: (*parser).callonPrefixedOp1,
+ expr: &choiceExpr{
+ pos: position{line: 106, col: 16, offset: 2720},
+ alternatives: []interface{}{
+ &litMatcher{
+ pos: position{line: 106, col: 16, offset: 2720},
+ val: "&",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 106, col: 22, offset: 2726},
+ val: "!",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "SuffixedExpr",
+ pos: position{line: 110, col: 1, offset: 2768},
+ expr: &choiceExpr{
+ pos: position{line: 110, col: 16, offset: 2785},
+ alternatives: []interface{}{
+ &actionExpr{
+ pos: position{line: 110, col: 16, offset: 2785},
+ run: (*parser).callonSuffixedExpr2,
+ expr: &seqExpr{
+ pos: position{line: 110, col: 16, offset: 2785},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 110, col: 16, offset: 2785},
+ label: "expr",
+ expr: &ruleRefExpr{
+ pos: position{line: 110, col: 21, offset: 2790},
+ name: "PrimaryExpr",
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 110, col: 33, offset: 2802},
+ name: "__",
+ },
+ &labeledExpr{
+ pos: position{line: 110, col: 36, offset: 2805},
+ label: "op",
+ expr: &ruleRefExpr{
+ pos: position{line: 110, col: 39, offset: 2808},
+ name: "SuffixedOp",
+ },
+ },
+ },
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 129, col: 5, offset: 3338},
+ name: "PrimaryExpr",
+ },
+ },
+ },
+ },
+ {
+ name: "SuffixedOp",
+ pos: position{line: 131, col: 1, offset: 3352},
+ expr: &actionExpr{
+ pos: position{line: 131, col: 14, offset: 3367},
+ run: (*parser).callonSuffixedOp1,
+ expr: &choiceExpr{
+ pos: position{line: 131, col: 16, offset: 3369},
+ alternatives: []interface{}{
+ &litMatcher{
+ pos: position{line: 131, col: 16, offset: 3369},
+ val: "?",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 131, col: 22, offset: 3375},
+ val: "*",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 131, col: 28, offset: 3381},
+ val: "+",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "PrimaryExpr",
+ pos: position{line: 135, col: 1, offset: 3423},
+ expr: &choiceExpr{
+ pos: position{line: 135, col: 15, offset: 3439},
+ alternatives: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 135, col: 15, offset: 3439},
+ name: "LitMatcher",
+ },
+ &ruleRefExpr{
+ pos: position{line: 135, col: 28, offset: 3452},
+ name: "CharClassMatcher",
+ },
+ &ruleRefExpr{
+ pos: position{line: 135, col: 47, offset: 3471},
+ name: "AnyMatcher",
+ },
+ &ruleRefExpr{
+ pos: position{line: 135, col: 60, offset: 3484},
+ name: "RuleRefExpr",
+ },
+ &ruleRefExpr{
+ pos: position{line: 135, col: 74, offset: 3498},
+ name: "SemanticPredExpr",
+ },
+ &actionExpr{
+ pos: position{line: 135, col: 93, offset: 3517},
+ run: (*parser).callonPrimaryExpr7,
+ expr: &seqExpr{
+ pos: position{line: 135, col: 93, offset: 3517},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 135, col: 93, offset: 3517},
+ val: "(",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 135, col: 97, offset: 3521},
+ name: "__",
+ },
+ &labeledExpr{
+ pos: position{line: 135, col: 100, offset: 3524},
+ label: "expr",
+ expr: &ruleRefExpr{
+ pos: position{line: 135, col: 105, offset: 3529},
+ name: "Expression",
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 135, col: 116, offset: 3540},
+ name: "__",
+ },
+ &litMatcher{
+ pos: position{line: 135, col: 119, offset: 3543},
+ val: ")",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "RuleRefExpr",
+ pos: position{line: 138, col: 1, offset: 3572},
+ expr: &actionExpr{
+ pos: position{line: 138, col: 15, offset: 3588},
+ run: (*parser).callonRuleRefExpr1,
+ expr: &seqExpr{
+ pos: position{line: 138, col: 15, offset: 3588},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 138, col: 15, offset: 3588},
+ label: "name",
+ expr: &ruleRefExpr{
+ pos: position{line: 138, col: 20, offset: 3593},
+ name: "IdentifierName",
+ },
+ },
+ ¬Expr{
+ pos: position{line: 138, col: 35, offset: 3608},
+ expr: &seqExpr{
+ pos: position{line: 138, col: 38, offset: 3611},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 138, col: 38, offset: 3611},
+ name: "__",
+ },
+ &zeroOrOneExpr{
+ pos: position{line: 138, col: 43, offset: 3616},
+ expr: &seqExpr{
+ pos: position{line: 138, col: 43, offset: 3616},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 138, col: 43, offset: 3616},
+ name: "StringLiteral",
+ },
+ &ruleRefExpr{
+ pos: position{line: 138, col: 57, offset: 3630},
+ name: "__",
+ },
+ },
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 138, col: 63, offset: 3636},
+ name: "RuleDefOp",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "SemanticPredExpr",
+ pos: position{line: 143, col: 1, offset: 3752},
+ expr: &actionExpr{
+ pos: position{line: 143, col: 20, offset: 3773},
+ run: (*parser).callonSemanticPredExpr1,
+ expr: &seqExpr{
+ pos: position{line: 143, col: 20, offset: 3773},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 143, col: 20, offset: 3773},
+ label: "op",
+ expr: &ruleRefExpr{
+ pos: position{line: 143, col: 23, offset: 3776},
+ name: "SemanticPredOp",
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 143, col: 38, offset: 3791},
+ name: "__",
+ },
+ &labeledExpr{
+ pos: position{line: 143, col: 41, offset: 3794},
+ label: "code",
+ expr: &ruleRefExpr{
+ pos: position{line: 143, col: 46, offset: 3799},
+ name: "CodeBlock",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "SemanticPredOp",
+ pos: position{line: 154, col: 1, offset: 4076},
+ expr: &actionExpr{
+ pos: position{line: 154, col: 18, offset: 4095},
+ run: (*parser).callonSemanticPredOp1,
+ expr: &choiceExpr{
+ pos: position{line: 154, col: 20, offset: 4097},
+ alternatives: []interface{}{
+ &litMatcher{
+ pos: position{line: 154, col: 20, offset: 4097},
+ val: "&",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 154, col: 26, offset: 4103},
+ val: "!",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "RuleDefOp",
+ pos: position{line: 158, col: 1, offset: 4145},
+ expr: &choiceExpr{
+ pos: position{line: 158, col: 13, offset: 4159},
+ alternatives: []interface{}{
+ &litMatcher{
+ pos: position{line: 158, col: 13, offset: 4159},
+ val: "=",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 158, col: 19, offset: 4165},
+ val: "<-",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 158, col: 26, offset: 4172},
+ val: "←",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 158, col: 37, offset: 4183},
+ val: "⟵",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ {
+ name: "SourceChar",
+ pos: position{line: 160, col: 1, offset: 4193},
+ expr: &anyMatcher{
+ line: 160, col: 14, offset: 4208,
+ },
+ },
+ {
+ name: "Comment",
+ pos: position{line: 161, col: 1, offset: 4210},
+ expr: &choiceExpr{
+ pos: position{line: 161, col: 11, offset: 4222},
+ alternatives: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 161, col: 11, offset: 4222},
+ name: "MultiLineComment",
+ },
+ &ruleRefExpr{
+ pos: position{line: 161, col: 30, offset: 4241},
+ name: "SingleLineComment",
+ },
+ },
+ },
+ },
+ {
+ name: "MultiLineComment",
+ pos: position{line: 162, col: 1, offset: 4259},
+ expr: &seqExpr{
+ pos: position{line: 162, col: 20, offset: 4280},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 162, col: 20, offset: 4280},
+ val: "/*",
+ ignoreCase: false,
+ },
+ &zeroOrMoreExpr{
+ pos: position{line: 162, col: 27, offset: 4287},
+ expr: &seqExpr{
+ pos: position{line: 162, col: 27, offset: 4287},
+ exprs: []interface{}{
+ ¬Expr{
+ pos: position{line: 162, col: 27, offset: 4287},
+ expr: &litMatcher{
+ pos: position{line: 162, col: 28, offset: 4288},
+ val: "*/",
+ ignoreCase: false,
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 162, col: 33, offset: 4293},
+ name: "SourceChar",
+ },
+ },
+ },
+ },
+ &litMatcher{
+ pos: position{line: 162, col: 47, offset: 4307},
+ val: "*/",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ {
+ name: "MultiLineCommentNoLineTerminator",
+ pos: position{line: 163, col: 1, offset: 4312},
+ expr: &seqExpr{
+ pos: position{line: 163, col: 36, offset: 4349},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 163, col: 36, offset: 4349},
+ val: "/*",
+ ignoreCase: false,
+ },
+ &zeroOrMoreExpr{
+ pos: position{line: 163, col: 43, offset: 4356},
+ expr: &seqExpr{
+ pos: position{line: 163, col: 43, offset: 4356},
+ exprs: []interface{}{
+ ¬Expr{
+ pos: position{line: 163, col: 43, offset: 4356},
+ expr: &choiceExpr{
+ pos: position{line: 163, col: 46, offset: 4359},
+ alternatives: []interface{}{
+ &litMatcher{
+ pos: position{line: 163, col: 46, offset: 4359},
+ val: "*/",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 163, col: 53, offset: 4366},
+ name: "EOL",
+ },
+ },
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 163, col: 59, offset: 4372},
+ name: "SourceChar",
+ },
+ },
+ },
+ },
+ &litMatcher{
+ pos: position{line: 163, col: 73, offset: 4386},
+ val: "*/",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ {
+ name: "SingleLineComment",
+ pos: position{line: 164, col: 1, offset: 4391},
+ expr: &seqExpr{
+ pos: position{line: 164, col: 21, offset: 4413},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 164, col: 21, offset: 4413},
+ val: "//",
+ ignoreCase: false,
+ },
+ &zeroOrMoreExpr{
+ pos: position{line: 164, col: 28, offset: 4420},
+ expr: &seqExpr{
+ pos: position{line: 164, col: 28, offset: 4420},
+ exprs: []interface{}{
+ ¬Expr{
+ pos: position{line: 164, col: 28, offset: 4420},
+ expr: &ruleRefExpr{
+ pos: position{line: 164, col: 29, offset: 4421},
+ name: "EOL",
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 164, col: 33, offset: 4425},
+ name: "SourceChar",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Identifier",
+ pos: position{line: 166, col: 1, offset: 4440},
+ expr: &ruleRefExpr{
+ pos: position{line: 166, col: 14, offset: 4455},
+ name: "IdentifierName",
+ },
+ },
+ {
+ name: "IdentifierName",
+ pos: position{line: 167, col: 1, offset: 4470},
+ expr: &actionExpr{
+ pos: position{line: 167, col: 18, offset: 4489},
+ run: (*parser).callonIdentifierName1,
+ expr: &seqExpr{
+ pos: position{line: 167, col: 18, offset: 4489},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 167, col: 18, offset: 4489},
+ name: "IdentifierStart",
+ },
+ &zeroOrMoreExpr{
+ pos: position{line: 167, col: 34, offset: 4505},
+ expr: &ruleRefExpr{
+ pos: position{line: 167, col: 34, offset: 4505},
+ name: "IdentifierPart",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "IdentifierStart",
+ pos: position{line: 170, col: 1, offset: 4587},
+ expr: &charClassMatcher{
+ pos: position{line: 170, col: 19, offset: 4607},
+ val: "[a-z_]i",
+ chars: []rune{'_'},
+ ranges: []rune{'a', 'z'},
+ ignoreCase: true,
+ inverted: false,
+ },
+ },
+ {
+ name: "IdentifierPart",
+ pos: position{line: 171, col: 1, offset: 4615},
+ expr: &choiceExpr{
+ pos: position{line: 171, col: 18, offset: 4634},
+ alternatives: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 171, col: 18, offset: 4634},
+ name: "IdentifierStart",
+ },
+ &charClassMatcher{
+ pos: position{line: 171, col: 36, offset: 4652},
+ val: "[0-9]",
+ ranges: []rune{'0', '9'},
+ ignoreCase: false,
+ inverted: false,
+ },
+ },
+ },
+ },
+ {
+ name: "LitMatcher",
+ pos: position{line: 173, col: 1, offset: 4659},
+ expr: &actionExpr{
+ pos: position{line: 173, col: 14, offset: 4674},
+ run: (*parser).callonLitMatcher1,
+ expr: &seqExpr{
+ pos: position{line: 173, col: 14, offset: 4674},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 173, col: 14, offset: 4674},
+ label: "lit",
+ expr: &ruleRefExpr{
+ pos: position{line: 173, col: 18, offset: 4678},
+ name: "StringLiteral",
+ },
+ },
+ &labeledExpr{
+ pos: position{line: 173, col: 32, offset: 4692},
+ label: "ignore",
+ expr: &zeroOrOneExpr{
+ pos: position{line: 173, col: 39, offset: 4699},
+ expr: &litMatcher{
+ pos: position{line: 173, col: 39, offset: 4699},
+ val: "i",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "StringLiteral",
+ pos: position{line: 183, col: 1, offset: 4925},
+ expr: &actionExpr{
+ pos: position{line: 183, col: 17, offset: 4943},
+ run: (*parser).callonStringLiteral1,
+ expr: &choiceExpr{
+ pos: position{line: 183, col: 19, offset: 4945},
+ alternatives: []interface{}{
+ &seqExpr{
+ pos: position{line: 183, col: 19, offset: 4945},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 183, col: 19, offset: 4945},
+ val: "\"",
+ ignoreCase: false,
+ },
+ &zeroOrMoreExpr{
+ pos: position{line: 183, col: 23, offset: 4949},
+ expr: &ruleRefExpr{
+ pos: position{line: 183, col: 23, offset: 4949},
+ name: "DoubleStringChar",
+ },
+ },
+ &litMatcher{
+ pos: position{line: 183, col: 41, offset: 4967},
+ val: "\"",
+ ignoreCase: false,
+ },
+ },
+ },
+ &seqExpr{
+ pos: position{line: 183, col: 47, offset: 4973},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 183, col: 47, offset: 4973},
+ val: "'",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 183, col: 51, offset: 4977},
+ name: "SingleStringChar",
+ },
+ &litMatcher{
+ pos: position{line: 183, col: 68, offset: 4994},
+ val: "'",
+ ignoreCase: false,
+ },
+ },
+ },
+ &seqExpr{
+ pos: position{line: 183, col: 74, offset: 5000},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 183, col: 74, offset: 5000},
+ val: "`",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 183, col: 78, offset: 5004},
+ name: "RawStringChar",
+ },
+ &litMatcher{
+ pos: position{line: 183, col: 92, offset: 5018},
+ val: "`",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "DoubleStringChar",
+ pos: position{line: 186, col: 1, offset: 5089},
+ expr: &choiceExpr{
+ pos: position{line: 186, col: 20, offset: 5110},
+ alternatives: []interface{}{
+ &seqExpr{
+ pos: position{line: 186, col: 20, offset: 5110},
+ exprs: []interface{}{
+ ¬Expr{
+ pos: position{line: 186, col: 20, offset: 5110},
+ expr: &choiceExpr{
+ pos: position{line: 186, col: 23, offset: 5113},
+ alternatives: []interface{}{
+ &litMatcher{
+ pos: position{line: 186, col: 23, offset: 5113},
+ val: "\"",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 186, col: 29, offset: 5119},
+ val: "\\",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 186, col: 36, offset: 5126},
+ name: "EOL",
+ },
+ },
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 186, col: 42, offset: 5132},
+ name: "SourceChar",
+ },
+ },
+ },
+ &seqExpr{
+ pos: position{line: 186, col: 55, offset: 5145},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 186, col: 55, offset: 5145},
+ val: "\\",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 186, col: 60, offset: 5150},
+ name: "DoubleStringEscape",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "SingleStringChar",
+ pos: position{line: 187, col: 1, offset: 5169},
+ expr: &choiceExpr{
+ pos: position{line: 187, col: 20, offset: 5190},
+ alternatives: []interface{}{
+ &seqExpr{
+ pos: position{line: 187, col: 20, offset: 5190},
+ exprs: []interface{}{
+ ¬Expr{
+ pos: position{line: 187, col: 20, offset: 5190},
+ expr: &choiceExpr{
+ pos: position{line: 187, col: 23, offset: 5193},
+ alternatives: []interface{}{
+ &litMatcher{
+ pos: position{line: 187, col: 23, offset: 5193},
+ val: "'",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 187, col: 29, offset: 5199},
+ val: "\\",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 187, col: 36, offset: 5206},
+ name: "EOL",
+ },
+ },
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 187, col: 42, offset: 5212},
+ name: "SourceChar",
+ },
+ },
+ },
+ &seqExpr{
+ pos: position{line: 187, col: 55, offset: 5225},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 187, col: 55, offset: 5225},
+ val: "\\",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 187, col: 60, offset: 5230},
+ name: "SingleStringEscape",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "RawStringChar",
+ pos: position{line: 188, col: 1, offset: 5249},
+ expr: &seqExpr{
+ pos: position{line: 188, col: 17, offset: 5267},
+ exprs: []interface{}{
+ ¬Expr{
+ pos: position{line: 188, col: 17, offset: 5267},
+ expr: &litMatcher{
+ pos: position{line: 188, col: 18, offset: 5268},
+ val: "`",
+ ignoreCase: false,
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 188, col: 22, offset: 5272},
+ name: "SourceChar",
+ },
+ },
+ },
+ },
+ {
+ name: "DoubleStringEscape",
+ pos: position{line: 190, col: 1, offset: 5284},
+ expr: &choiceExpr{
+ pos: position{line: 190, col: 22, offset: 5307},
+ alternatives: []interface{}{
+ &litMatcher{
+ pos: position{line: 190, col: 22, offset: 5307},
+ val: "'",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 190, col: 28, offset: 5313},
+ name: "CommonEscapeSequence",
+ },
+ },
+ },
+ },
+ {
+ name: "SingleStringEscape",
+ pos: position{line: 191, col: 1, offset: 5334},
+ expr: &choiceExpr{
+ pos: position{line: 191, col: 22, offset: 5357},
+ alternatives: []interface{}{
+ &litMatcher{
+ pos: position{line: 191, col: 22, offset: 5357},
+ val: "\"",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 191, col: 28, offset: 5363},
+ name: "CommonEscapeSequence",
+ },
+ },
+ },
+ },
+ {
+ name: "CommonEscapeSequence",
+ pos: position{line: 193, col: 1, offset: 5385},
+ expr: &choiceExpr{
+ pos: position{line: 193, col: 24, offset: 5410},
+ alternatives: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 193, col: 24, offset: 5410},
+ name: "SingleCharEscape",
+ },
+ &ruleRefExpr{
+ pos: position{line: 193, col: 43, offset: 5429},
+ name: "OctalEscape",
+ },
+ &ruleRefExpr{
+ pos: position{line: 193, col: 57, offset: 5443},
+ name: "HexEscape",
+ },
+ &ruleRefExpr{
+ pos: position{line: 193, col: 69, offset: 5455},
+ name: "LongUnicodeEscape",
+ },
+ &ruleRefExpr{
+ pos: position{line: 193, col: 89, offset: 5475},
+ name: "ShortUnicodeEscape",
+ },
+ },
+ },
+ },
+ {
+ name: "SingleCharEscape",
+ pos: position{line: 194, col: 1, offset: 5494},
+ expr: &choiceExpr{
+ pos: position{line: 194, col: 20, offset: 5515},
+ alternatives: []interface{}{
+ &litMatcher{
+ pos: position{line: 194, col: 20, offset: 5515},
+ val: "a",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 194, col: 26, offset: 5521},
+ val: "b",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 194, col: 32, offset: 5527},
+ val: "n",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 194, col: 38, offset: 5533},
+ val: "f",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 194, col: 44, offset: 5539},
+ val: "r",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 194, col: 50, offset: 5545},
+ val: "t",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 194, col: 56, offset: 5551},
+ val: "v",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 194, col: 62, offset: 5557},
+ val: "\\",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ {
+ name: "OctalEscape",
+ pos: position{line: 195, col: 1, offset: 5562},
+ expr: &seqExpr{
+ pos: position{line: 195, col: 15, offset: 5578},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 195, col: 15, offset: 5578},
+ name: "OctalDigit",
+ },
+ &ruleRefExpr{
+ pos: position{line: 195, col: 26, offset: 5589},
+ name: "OctalDigit",
+ },
+ &ruleRefExpr{
+ pos: position{line: 195, col: 37, offset: 5600},
+ name: "OctalDigit",
+ },
+ },
+ },
+ },
+ {
+ name: "HexEscape",
+ pos: position{line: 196, col: 1, offset: 5611},
+ expr: &seqExpr{
+ pos: position{line: 196, col: 13, offset: 5625},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 196, col: 13, offset: 5625},
+ val: "x",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 196, col: 17, offset: 5629},
+ name: "HexDigit",
+ },
+ &ruleRefExpr{
+ pos: position{line: 196, col: 26, offset: 5638},
+ name: "HexDigit",
+ },
+ },
+ },
+ },
+ {
+ name: "LongUnicodeEscape",
+ pos: position{line: 197, col: 1, offset: 5647},
+ expr: &seqExpr{
+ pos: position{line: 197, col: 21, offset: 5669},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 197, col: 21, offset: 5669},
+ val: "U",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 197, col: 25, offset: 5673},
+ name: "HexDigit",
+ },
+ &ruleRefExpr{
+ pos: position{line: 197, col: 34, offset: 5682},
+ name: "HexDigit",
+ },
+ &ruleRefExpr{
+ pos: position{line: 197, col: 43, offset: 5691},
+ name: "HexDigit",
+ },
+ &ruleRefExpr{
+ pos: position{line: 197, col: 52, offset: 5700},
+ name: "HexDigit",
+ },
+ &ruleRefExpr{
+ pos: position{line: 197, col: 61, offset: 5709},
+ name: "HexDigit",
+ },
+ &ruleRefExpr{
+ pos: position{line: 197, col: 70, offset: 5718},
+ name: "HexDigit",
+ },
+ &ruleRefExpr{
+ pos: position{line: 197, col: 79, offset: 5727},
+ name: "HexDigit",
+ },
+ &ruleRefExpr{
+ pos: position{line: 197, col: 88, offset: 5736},
+ name: "HexDigit",
+ },
+ },
+ },
+ },
+ {
+ name: "ShortUnicodeEscape",
+ pos: position{line: 198, col: 1, offset: 5745},
+ expr: &seqExpr{
+ pos: position{line: 198, col: 22, offset: 5768},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 198, col: 22, offset: 5768},
+ val: "u",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 198, col: 26, offset: 5772},
+ name: "HexDigit",
+ },
+ &ruleRefExpr{
+ pos: position{line: 198, col: 35, offset: 5781},
+ name: "HexDigit",
+ },
+ &ruleRefExpr{
+ pos: position{line: 198, col: 44, offset: 5790},
+ name: "HexDigit",
+ },
+ &ruleRefExpr{
+ pos: position{line: 198, col: 53, offset: 5799},
+ name: "HexDigit",
+ },
+ },
+ },
+ },
+ {
+ name: "OctalDigit",
+ pos: position{line: 200, col: 1, offset: 5809},
+ expr: &charClassMatcher{
+ pos: position{line: 200, col: 14, offset: 5824},
+ val: "[0-7]",
+ ranges: []rune{'0', '7'},
+ ignoreCase: false,
+ inverted: false,
+ },
+ },
+ {
+ name: "DecimalDigit",
+ pos: position{line: 201, col: 1, offset: 5830},
+ expr: &charClassMatcher{
+ pos: position{line: 201, col: 16, offset: 5847},
+ val: "[0-9]",
+ ranges: []rune{'0', '9'},
+ ignoreCase: false,
+ inverted: false,
+ },
+ },
+ {
+ name: "HexDigit",
+ pos: position{line: 202, col: 1, offset: 5853},
+ expr: &charClassMatcher{
+ pos: position{line: 202, col: 12, offset: 5866},
+ val: "[0-9a-f]i",
+ ranges: []rune{'0', '9', 'a', 'f'},
+ ignoreCase: true,
+ inverted: false,
+ },
+ },
+ {
+ name: "CharClassMatcher",
+ pos: position{line: 204, col: 1, offset: 5877},
+ expr: &actionExpr{
+ pos: position{line: 204, col: 20, offset: 5898},
+ run: (*parser).callonCharClassMatcher1,
+ expr: &seqExpr{
+ pos: position{line: 204, col: 20, offset: 5898},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 204, col: 20, offset: 5898},
+ val: "[",
+ ignoreCase: false,
+ },
+ &zeroOrMoreExpr{
+ pos: position{line: 204, col: 26, offset: 5904},
+ expr: &choiceExpr{
+ pos: position{line: 204, col: 26, offset: 5904},
+ alternatives: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 204, col: 26, offset: 5904},
+ name: "ClassCharRange",
+ },
+ &ruleRefExpr{
+ pos: position{line: 204, col: 43, offset: 5921},
+ name: "ClassChar",
+ },
+ &seqExpr{
+ pos: position{line: 204, col: 55, offset: 5933},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 204, col: 55, offset: 5933},
+ val: "\\",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 204, col: 60, offset: 5938},
+ name: "UnicodeClassEscape",
+ },
+ },
+ },
+ },
+ },
+ },
+ &litMatcher{
+ pos: position{line: 204, col: 82, offset: 5960},
+ val: "]",
+ ignoreCase: false,
+ },
+ &zeroOrOneExpr{
+ pos: position{line: 204, col: 86, offset: 5964},
+ expr: &litMatcher{
+ pos: position{line: 204, col: 86, offset: 5964},
+ val: "i",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "ClassCharRange",
+ pos: position{line: 209, col: 1, offset: 6069},
+ expr: &seqExpr{
+ pos: position{line: 209, col: 18, offset: 6088},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 209, col: 18, offset: 6088},
+ name: "ClassChar",
+ },
+ &litMatcher{
+ pos: position{line: 209, col: 28, offset: 6098},
+ val: "-",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 209, col: 32, offset: 6102},
+ name: "ClassChar",
+ },
+ },
+ },
+ },
+ {
+ name: "ClassChar",
+ pos: position{line: 210, col: 1, offset: 6112},
+ expr: &choiceExpr{
+ pos: position{line: 210, col: 13, offset: 6126},
+ alternatives: []interface{}{
+ &seqExpr{
+ pos: position{line: 210, col: 13, offset: 6126},
+ exprs: []interface{}{
+ ¬Expr{
+ pos: position{line: 210, col: 13, offset: 6126},
+ expr: &choiceExpr{
+ pos: position{line: 210, col: 16, offset: 6129},
+ alternatives: []interface{}{
+ &litMatcher{
+ pos: position{line: 210, col: 16, offset: 6129},
+ val: "]",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 210, col: 22, offset: 6135},
+ val: "\\",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 210, col: 29, offset: 6142},
+ name: "EOL",
+ },
+ },
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 210, col: 35, offset: 6148},
+ name: "SourceChar",
+ },
+ },
+ },
+ &seqExpr{
+ pos: position{line: 210, col: 48, offset: 6161},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 210, col: 48, offset: 6161},
+ val: "\\",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 210, col: 53, offset: 6166},
+ name: "CharClassEscape",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "CharClassEscape",
+ pos: position{line: 211, col: 1, offset: 6182},
+ expr: &choiceExpr{
+ pos: position{line: 211, col: 19, offset: 6202},
+ alternatives: []interface{}{
+ &litMatcher{
+ pos: position{line: 211, col: 19, offset: 6202},
+ val: "]",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 211, col: 25, offset: 6208},
+ name: "CommonEscapeSequence",
+ },
+ },
+ },
+ },
+ {
+ name: "UnicodeClassEscape",
+ pos: position{line: 213, col: 1, offset: 6230},
+ expr: &seqExpr{
+ pos: position{line: 213, col: 22, offset: 6253},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 213, col: 22, offset: 6253},
+ val: "p",
+ ignoreCase: false,
+ },
+ &choiceExpr{
+ pos: position{line: 213, col: 28, offset: 6259},
+ alternatives: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 213, col: 28, offset: 6259},
+ name: "SingleCharUnicodeClass",
+ },
+ &seqExpr{
+ pos: position{line: 213, col: 53, offset: 6284},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 213, col: 53, offset: 6284},
+ val: "{",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 213, col: 57, offset: 6288},
+ name: "UnicodeClass",
+ },
+ &litMatcher{
+ pos: position{line: 213, col: 70, offset: 6301},
+ val: "}",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "SingleCharUnicodeClass",
+ pos: position{line: 214, col: 1, offset: 6307},
+ expr: &charClassMatcher{
+ pos: position{line: 214, col: 26, offset: 6334},
+ val: "[LMNCPZS]",
+ chars: []rune{'L', 'M', 'N', 'C', 'P', 'Z', 'S'},
+ ignoreCase: false,
+ inverted: false,
+ },
+ },
+ {
+ name: "UnicodeClass",
+ pos: position{line: 215, col: 1, offset: 6344},
+ expr: &oneOrMoreExpr{
+ pos: position{line: 215, col: 16, offset: 6361},
+ expr: &charClassMatcher{
+ pos: position{line: 215, col: 16, offset: 6361},
+ val: "[a-z_]i",
+ chars: []rune{'_'},
+ ranges: []rune{'a', 'z'},
+ ignoreCase: true,
+ inverted: false,
+ },
+ },
+ },
+ {
+ name: "AnyMatcher",
+ pos: position{line: 217, col: 1, offset: 6371},
+ expr: &actionExpr{
+ pos: position{line: 217, col: 14, offset: 6386},
+ run: (*parser).callonAnyMatcher1,
+ expr: &litMatcher{
+ pos: position{line: 217, col: 14, offset: 6386},
+ val: ".",
+ ignoreCase: false,
+ },
+ },
+ },
+ {
+ name: "CodeBlock",
+ pos: position{line: 222, col: 1, offset: 6461},
+ expr: &actionExpr{
+ pos: position{line: 222, col: 13, offset: 6475},
+ run: (*parser).callonCodeBlock1,
+ expr: &seqExpr{
+ pos: position{line: 222, col: 13, offset: 6475},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 222, col: 13, offset: 6475},
+ val: "{",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 222, col: 17, offset: 6479},
+ name: "Code",
+ },
+ &litMatcher{
+ pos: position{line: 222, col: 22, offset: 6484},
+ val: "}",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Code",
+ pos: position{line: 228, col: 1, offset: 6582},
+ expr: &zeroOrMoreExpr{
+ pos: position{line: 228, col: 10, offset: 6593},
+ expr: &choiceExpr{
+ pos: position{line: 228, col: 10, offset: 6593},
+ alternatives: []interface{}{
+ &oneOrMoreExpr{
+ pos: position{line: 228, col: 12, offset: 6595},
+ expr: &seqExpr{
+ pos: position{line: 228, col: 12, offset: 6595},
+ exprs: []interface{}{
+ ¬Expr{
+ pos: position{line: 228, col: 12, offset: 6595},
+ expr: &charClassMatcher{
+ pos: position{line: 228, col: 13, offset: 6596},
+ val: "[{}]",
+ chars: []rune{'{', '}'},
+ ignoreCase: false,
+ inverted: false,
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 228, col: 18, offset: 6601},
+ name: "SourceChar",
+ },
+ },
+ },
+ },
+ &seqExpr{
+ pos: position{line: 228, col: 34, offset: 6617},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 228, col: 34, offset: 6617},
+ val: "{",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 228, col: 38, offset: 6621},
+ name: "Code",
+ },
+ &litMatcher{
+ pos: position{line: 228, col: 43, offset: 6626},
+ val: "}",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "__",
+ pos: position{line: 230, col: 1, offset: 6634},
+ expr: &zeroOrMoreExpr{
+ pos: position{line: 230, col: 8, offset: 6643},
+ expr: &choiceExpr{
+ pos: position{line: 230, col: 8, offset: 6643},
+ alternatives: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 230, col: 8, offset: 6643},
+ name: "Whitespace",
+ },
+ &ruleRefExpr{
+ pos: position{line: 230, col: 21, offset: 6656},
+ name: "EOL",
+ },
+ &ruleRefExpr{
+ pos: position{line: 230, col: 27, offset: 6662},
+ name: "Comment",
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "_",
+ pos: position{line: 231, col: 1, offset: 6673},
+ expr: &zeroOrMoreExpr{
+ pos: position{line: 231, col: 7, offset: 6681},
+ expr: &choiceExpr{
+ pos: position{line: 231, col: 7, offset: 6681},
+ alternatives: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 231, col: 7, offset: 6681},
+ name: "Whitespace",
+ },
+ &ruleRefExpr{
+ pos: position{line: 231, col: 20, offset: 6694},
+ name: "MultiLineCommentNoLineTerminator",
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Whitespace",
+ pos: position{line: 233, col: 1, offset: 6731},
+ expr: &charClassMatcher{
+ pos: position{line: 233, col: 14, offset: 6746},
+ val: "[ \\t\\r]",
+ chars: []rune{' ', '\t', '\r'},
+ ignoreCase: false,
+ inverted: false,
+ },
+ },
+ {
+ name: "EOL",
+ pos: position{line: 234, col: 1, offset: 6754},
+ expr: &litMatcher{
+ pos: position{line: 234, col: 7, offset: 6762},
+ val: "\n",
+ ignoreCase: false,
+ },
+ },
+ {
+ name: "EOS",
+ pos: position{line: 235, col: 1, offset: 6767},
+ expr: &choiceExpr{
+ pos: position{line: 235, col: 7, offset: 6775},
+ alternatives: []interface{}{
+ &seqExpr{
+ pos: position{line: 235, col: 7, offset: 6775},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 235, col: 7, offset: 6775},
+ name: "__",
+ },
+ &litMatcher{
+ pos: position{line: 235, col: 10, offset: 6778},
+ val: ";",
+ ignoreCase: false,
+ },
+ },
+ },
+ &seqExpr{
+ pos: position{line: 235, col: 16, offset: 6784},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 235, col: 16, offset: 6784},
+ name: "_",
+ },
+ &zeroOrOneExpr{
+ pos: position{line: 235, col: 18, offset: 6786},
+ expr: &ruleRefExpr{
+ pos: position{line: 235, col: 18, offset: 6786},
+ name: "SingleLineComment",
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 235, col: 37, offset: 6805},
+ name: "EOL",
+ },
+ },
+ },
+ &seqExpr{
+ pos: position{line: 235, col: 43, offset: 6811},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 235, col: 43, offset: 6811},
+ name: "__",
+ },
+ &ruleRefExpr{
+ pos: position{line: 235, col: 46, offset: 6814},
+ name: "EOF",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "EOF",
+ pos: position{line: 237, col: 1, offset: 6819},
+ expr: ¬Expr{
+ pos: position{line: 237, col: 7, offset: 6827},
+ expr: &anyMatcher{
+ line: 237, col: 8, offset: 6828,
+ },
+ },
+ },
+ },
+}
+
+func (c *current) onGrammar1(initializer, rules interface{}) (interface{}, error) {
+ pos := c.astPos()
+
+ // create the grammar, assign its initializer
+ g := ast.NewGrammar(pos)
+ initSlice := toIfaceSlice(initializer)
+ if len(initSlice) > 0 {
+ g.Init = initSlice[0].(*ast.CodeBlock)
+ }
+
+ rulesSlice := toIfaceSlice(rules)
+ g.Rules = make([]*ast.Rule, len(rulesSlice))
+ for i, duo := range rulesSlice {
+ g.Rules[i] = duo.([]interface{})[0].(*ast.Rule)
+ }
+
+ return g, nil
+}
+
+func (p *parser) callonGrammar1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onGrammar1(stack["initializer"], stack["rules"])
+}
+
+func (c *current) onInitializer1(code interface{}) (interface{}, error) {
+ return code, nil
+}
+
+func (p *parser) callonInitializer1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onInitializer1(stack["code"])
+}
+
+func (c *current) onRule1(name, display, expr interface{}) (interface{}, error) {
+ pos := c.astPos()
+
+ rule := ast.NewRule(pos, name.(*ast.Identifier))
+ displaySlice := toIfaceSlice(display)
+ if len(displaySlice) > 0 {
+ rule.DisplayName = displaySlice[0].(*ast.StringLit)
+ }
+ rule.Expr = expr.(ast.Expression)
+
+ return rule, nil
+}
+
+func (p *parser) callonRule1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onRule1(stack["name"], stack["display"], stack["expr"])
+}
+
+func (c *current) onChoiceExpr1(first, rest interface{}) (interface{}, error) {
+ restSlice := toIfaceSlice(rest)
+ if len(restSlice) == 0 {
+ return first, nil
+ }
+
+ pos := c.astPos()
+ choice := ast.NewChoiceExpr(pos)
+ choice.Alternatives = []ast.Expression{first.(ast.Expression)}
+ for _, sl := range restSlice {
+ choice.Alternatives = append(choice.Alternatives, sl.([]interface{})[3].(ast.Expression))
+ }
+ return choice, nil
+}
+
+func (p *parser) callonChoiceExpr1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onChoiceExpr1(stack["first"], stack["rest"])
+}
+
+func (c *current) onActionExpr1(expr, code interface{}) (interface{}, error) {
+ if code == nil {
+ return expr, nil
+ }
+
+ pos := c.astPos()
+ act := ast.NewActionExpr(pos)
+ act.Expr = expr.(ast.Expression)
+ codeSlice := toIfaceSlice(code)
+ act.Code = codeSlice[1].(*ast.CodeBlock)
+
+ return act, nil
+}
+
+func (p *parser) callonActionExpr1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onActionExpr1(stack["expr"], stack["code"])
+}
+
+func (c *current) onSeqExpr1(first, rest interface{}) (interface{}, error) {
+ restSlice := toIfaceSlice(rest)
+ if len(restSlice) == 0 {
+ return first, nil
+ }
+ seq := ast.NewSeqExpr(c.astPos())
+ seq.Exprs = []ast.Expression{first.(ast.Expression)}
+ for _, sl := range restSlice {
+ seq.Exprs = append(seq.Exprs, sl.([]interface{})[1].(ast.Expression))
+ }
+ return seq, nil
+}
+
+func (p *parser) callonSeqExpr1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onSeqExpr1(stack["first"], stack["rest"])
+}
+
+func (c *current) onLabeledExpr2(label, expr interface{}) (interface{}, error) {
+ pos := c.astPos()
+ lab := ast.NewLabeledExpr(pos)
+ lab.Label = label.(*ast.Identifier)
+ lab.Expr = expr.(ast.Expression)
+ return lab, nil
+}
+
+func (p *parser) callonLabeledExpr2() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onLabeledExpr2(stack["label"], stack["expr"])
+}
+
+func (c *current) onPrefixedExpr2(op, expr interface{}) (interface{}, error) {
+ pos := c.astPos()
+ opStr := op.(string)
+ if opStr == "&" {
+ and := ast.NewAndExpr(pos)
+ and.Expr = expr.(ast.Expression)
+ return and, nil
+ }
+ not := ast.NewNotExpr(pos)
+ not.Expr = expr.(ast.Expression)
+ return not, nil
+}
+
+func (p *parser) callonPrefixedExpr2() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onPrefixedExpr2(stack["op"], stack["expr"])
+}
+
+func (c *current) onPrefixedOp1() (interface{}, error) {
+ return string(c.text), nil
+}
+
+func (p *parser) callonPrefixedOp1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onPrefixedOp1()
+}
+
+func (c *current) onSuffixedExpr2(expr, op interface{}) (interface{}, error) {
+ pos := c.astPos()
+ opStr := op.(string)
+ switch opStr {
+ case "?":
+ zero := ast.NewZeroOrOneExpr(pos)
+ zero.Expr = expr.(ast.Expression)
+ return zero, nil
+ case "*":
+ zero := ast.NewZeroOrMoreExpr(pos)
+ zero.Expr = expr.(ast.Expression)
+ return zero, nil
+ case "+":
+ one := ast.NewOneOrMoreExpr(pos)
+ one.Expr = expr.(ast.Expression)
+ return one, nil
+ default:
+ return nil, errors.New("unknown operator: " + opStr)
+ }
+}
+
+func (p *parser) callonSuffixedExpr2() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onSuffixedExpr2(stack["expr"], stack["op"])
+}
+
+func (c *current) onSuffixedOp1() (interface{}, error) {
+ return string(c.text), nil
+}
+
+func (p *parser) callonSuffixedOp1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onSuffixedOp1()
+}
+
+func (c *current) onPrimaryExpr7(expr interface{}) (interface{}, error) {
+ return expr, nil
+}
+
+func (p *parser) callonPrimaryExpr7() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onPrimaryExpr7(stack["expr"])
+}
+
+func (c *current) onRuleRefExpr1(name interface{}) (interface{}, error) {
+ ref := ast.NewRuleRefExpr(c.astPos())
+ ref.Name = name.(*ast.Identifier)
+ return ref, nil
+}
+
+func (p *parser) callonRuleRefExpr1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onRuleRefExpr1(stack["name"])
+}
+
+func (c *current) onSemanticPredExpr1(op, code interface{}) (interface{}, error) {
+ opStr := op.(string)
+ if opStr == "&" {
+ and := ast.NewAndCodeExpr(c.astPos())
+ and.Code = code.(*ast.CodeBlock)
+ return and, nil
+ }
+ not := ast.NewNotCodeExpr(c.astPos())
+ not.Code = code.(*ast.CodeBlock)
+ return not, nil
+}
+
+func (p *parser) callonSemanticPredExpr1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onSemanticPredExpr1(stack["op"], stack["code"])
+}
+
+func (c *current) onSemanticPredOp1() (interface{}, error) {
+ return string(c.text), nil
+}
+
+func (p *parser) callonSemanticPredOp1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onSemanticPredOp1()
+}
+
+func (c *current) onIdentifierName1() (interface{}, error) {
+ return ast.NewIdentifier(c.astPos(), string(c.text)), nil
+}
+
+func (p *parser) callonIdentifierName1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onIdentifierName1()
+}
+
+func (c *current) onLitMatcher1(lit, ignore interface{}) (interface{}, error) {
+ rawStr := lit.(*ast.StringLit).Val
+ s, err := strconv.Unquote(rawStr)
+ if err != nil {
+ return nil, err
+ }
+ m := ast.NewLitMatcher(c.astPos(), s)
+ m.IgnoreCase = ignore != nil
+ return m, nil
+}
+
+func (p *parser) callonLitMatcher1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onLitMatcher1(stack["lit"], stack["ignore"])
+}
+
+func (c *current) onStringLiteral1() (interface{}, error) {
+ return ast.NewStringLit(c.astPos(), string(c.text)), nil
+}
+
+func (p *parser) callonStringLiteral1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onStringLiteral1()
+}
+
+func (c *current) onCharClassMatcher1() (interface{}, error) {
+ pos := c.astPos()
+ cc := ast.NewCharClassMatcher(pos, string(c.text))
+ return cc, nil
+}
+
+func (p *parser) callonCharClassMatcher1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onCharClassMatcher1()
+}
+
+func (c *current) onAnyMatcher1() (interface{}, error) {
+ any := ast.NewAnyMatcher(c.astPos(), ".")
+ return any, nil
+}
+
+func (p *parser) callonAnyMatcher1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onAnyMatcher1()
+}
+
+func (c *current) onCodeBlock1() (interface{}, error) {
+ pos := c.astPos()
+ cb := ast.NewCodeBlock(pos, string(c.text))
+ return cb, nil
+}
+
+func (p *parser) callonCodeBlock1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onCodeBlock1()
+}
+
+var (
+ // errNoRule is returned when the grammar to parse has no rule.
+ errNoRule = errors.New("grammar has no rule")
+
+ // errInvalidEncoding is returned when the source is not properly
+ // utf8-encoded.
+ errInvalidEncoding = errors.New("invalid encoding")
+
+ // errNoMatch is returned if no match could be found.
+ errNoMatch = errors.New("no match found")
+)
+
+// Option is a function that can set an option on the parser. It returns
+// the previous setting as an Option.
+type Option func(*parser) Option
+
+// Debug creates an Option to set the debug flag to b. When set to true,
+// debugging information is printed to stdout while parsing.
+//
+// The default is false.
+func Debug(b bool) Option {
+ return func(p *parser) Option {
+ old := p.debug
+ p.debug = b
+ return Debug(old)
+ }
+}
+
+// Memoize creates an Option to set the memoize flag to b. When set to true,
+// the parser will cache all results so each expression is evaluated only
+// once. This guarantees linear parsing time even for pathological cases,
+// at the expense of more memory and slower times for typical cases.
+//
+// The default is false.
+func Memoize(b bool) Option {
+ return func(p *parser) Option {
+ old := p.memoize
+ p.memoize = b
+ return Memoize(old)
+ }
+}
+
+// Recover creates an Option to set the recover flag to b. When set to
+// true, this causes the parser to recover from panics and convert it
+// to an error. Setting it to false can be useful while debugging to
+// access the full stack trace.
+//
+// The default is true.
+func Recover(b bool) Option {
+ return func(p *parser) Option {
+ old := p.recover
+ p.recover = b
+ return Recover(old)
+ }
+}
+
+// ParseFile parses the file identified by filename.
+func ParseFile(filename string, opts ...Option) (interface{}, error) {
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ return ParseReader(filename, f, opts...)
+}
+
+// ParseReader parses the data from r using filename as information in the
+// error messages.
+func ParseReader(filename string, r io.Reader, opts ...Option) (interface{}, error) {
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+
+ return Parse(filename, b, opts...)
+}
+
+// Parse parses the data from b using filename as information in the
+// error messages.
+func Parse(filename string, b []byte, opts ...Option) (interface{}, error) {
+ return newParser(filename, b, opts...).parse(g)
+}
+
+// position records a position in the text.
+type position struct {
+ line, col, offset int
+}
+
+func (p position) String() string {
+ return fmt.Sprintf("%d:%d [%d]", p.line, p.col, p.offset)
+}
+
+// savepoint stores all state required to go back to this point in the
+// parser.
+type savepoint struct {
+ position
+ rn rune
+ w int
+}
+
+type current struct {
+ pos position // start position of the match
+ text []byte // raw text of the match
+}
+
+// the AST types...
+
+type grammar struct {
+ pos position
+ rules []*rule
+}
+
+type rule struct {
+ pos position
+ name string
+ displayName string
+ expr interface{}
+}
+
+type choiceExpr struct {
+ pos position
+ alternatives []interface{}
+}
+
+type actionExpr struct {
+ pos position
+ expr interface{}
+ run func(*parser) (interface{}, error)
+}
+
+type seqExpr struct {
+ pos position
+ exprs []interface{}
+}
+
+type labeledExpr struct {
+ pos position
+ label string
+ expr interface{}
+}
+
+type expr struct {
+ pos position
+ expr interface{}
+}
+
+type andExpr expr
+type notExpr expr
+type zeroOrOneExpr expr
+type zeroOrMoreExpr expr
+type oneOrMoreExpr expr
+
+type ruleRefExpr struct {
+ pos position
+ name string
+}
+
+type andCodeExpr struct {
+ pos position
+ run func(*parser) (bool, error)
+}
+
+type notCodeExpr struct {
+ pos position
+ run func(*parser) (bool, error)
+}
+
+type litMatcher struct {
+ pos position
+ val string
+ ignoreCase bool
+}
+
+type charClassMatcher struct {
+ pos position
+ val string
+ chars []rune
+ ranges []rune
+ classes []*unicode.RangeTable
+ ignoreCase bool
+ inverted bool
+}
+
+type anyMatcher position
+
+// errList cumulates the errors found by the parser.
+type errList []error
+
+func (e *errList) add(err error) {
+ *e = append(*e, err)
+}
+
+func (e errList) err() error {
+ if len(e) == 0 {
+ return nil
+ }
+ e.dedupe()
+ return e
+}
+
+func (e *errList) dedupe() {
+ var cleaned []error
+ set := make(map[string]bool)
+ for _, err := range *e {
+ if msg := err.Error(); !set[msg] {
+ set[msg] = true
+ cleaned = append(cleaned, err)
+ }
+ }
+ *e = cleaned
+}
+
+func (e errList) Error() string {
+ switch len(e) {
+ case 0:
+ return ""
+ case 1:
+ return e[0].Error()
+ default:
+ var buf bytes.Buffer
+
+ for i, err := range e {
+ if i > 0 {
+ buf.WriteRune('\n')
+ }
+ buf.WriteString(err.Error())
+ }
+ return buf.String()
+ }
+}
+
+// parserError wraps an error with a prefix indicating the rule in which
+// the error occurred. The original error is stored in the Inner field.
+type parserError struct {
+ Inner error
+ pos position
+ prefix string
+}
+
+// Error returns the error message.
+func (p *parserError) Error() string {
+ return p.prefix + ": " + p.Inner.Error()
+}
+
+// newParser creates a parser with the specified input source and options.
+func newParser(filename string, b []byte, opts ...Option) *parser {
+ p := &parser{
+ filename: filename,
+ errs: new(errList),
+ data: b,
+ pt: savepoint{position: position{line: 1}},
+ recover: true,
+ }
+ p.setOptions(opts)
+ return p
+}
+
+// setOptions applies the options to the parser.
+func (p *parser) setOptions(opts []Option) {
+ for _, opt := range opts {
+ opt(p)
+ }
+}
+
+type resultTuple struct {
+ v interface{}
+ b bool
+ end savepoint
+}
+
+type parser struct {
+ filename string
+ pt savepoint
+ cur current
+
+ data []byte
+ errs *errList
+
+ recover bool
+ debug bool
+ depth int
+
+ memoize bool
+ // memoization table for the packrat algorithm:
+ // map[offset in source] map[expression or rule] {value, match}
+ memo map[int]map[interface{}]resultTuple
+
+ // rules table, maps the rule identifier to the rule node
+ rules map[string]*rule
+ // variables stack, map of label to value
+ vstack []map[string]interface{}
+ // rule stack, allows identification of the current rule in errors
+ rstack []*rule
+
+ // stats
+ exprCnt int
+}
+
+// push a variable set on the vstack.
+func (p *parser) pushV() {
+ if cap(p.vstack) == len(p.vstack) {
+ // create new empty slot in the stack
+ p.vstack = append(p.vstack, nil)
+ } else {
+ // slice to 1 more
+ p.vstack = p.vstack[:len(p.vstack)+1]
+ }
+
+ // get the last args set
+ m := p.vstack[len(p.vstack)-1]
+ if m != nil && len(m) == 0 {
+ // empty map, all good
+ return
+ }
+
+ m = make(map[string]interface{})
+ p.vstack[len(p.vstack)-1] = m
+}
+
+// pop a variable set from the vstack.
+func (p *parser) popV() {
+ // if the map is not empty, clear it
+ m := p.vstack[len(p.vstack)-1]
+ if len(m) > 0 {
+ // GC that map
+ p.vstack[len(p.vstack)-1] = nil
+ }
+ p.vstack = p.vstack[:len(p.vstack)-1]
+}
+
+func (p *parser) print(prefix, s string) string {
+ if !p.debug {
+ return s
+ }
+
+ fmt.Printf("%s %d:%d:%d: %s [%#U]\n",
+ prefix, p.pt.line, p.pt.col, p.pt.offset, s, p.pt.rn)
+ return s
+}
+
+func (p *parser) in(s string) string {
+ p.depth++
+ return p.print(strings.Repeat(" ", p.depth)+">", s)
+}
+
+func (p *parser) out(s string) string {
+ p.depth--
+ return p.print(strings.Repeat(" ", p.depth)+"<", s)
+}
+
+func (p *parser) addErr(err error) {
+ p.addErrAt(err, p.pt.position)
+}
+
+func (p *parser) addErrAt(err error, pos position) {
+ var buf bytes.Buffer
+ if p.filename != "" {
+ buf.WriteString(p.filename)
+ }
+ if buf.Len() > 0 {
+ buf.WriteString(":")
+ }
+ buf.WriteString(fmt.Sprintf("%d:%d (%d)", pos.line, pos.col, pos.offset))
+ if len(p.rstack) > 0 {
+ if buf.Len() > 0 {
+ buf.WriteString(": ")
+ }
+ rule := p.rstack[len(p.rstack)-1]
+ if rule.displayName != "" {
+ buf.WriteString("rule " + rule.displayName)
+ } else {
+ buf.WriteString("rule " + rule.name)
+ }
+ }
+ pe := &parserError{Inner: err, prefix: buf.String()}
+ p.errs.add(pe)
+}
+
+// read advances the parser to the next rune.
+func (p *parser) read() {
+ p.pt.offset += p.pt.w
+ rn, n := utf8.DecodeRune(p.data[p.pt.offset:])
+ p.pt.rn = rn
+ p.pt.w = n
+ p.pt.col++
+ if rn == '\n' {
+ p.pt.line++
+ p.pt.col = 0
+ }
+
+ if rn == utf8.RuneError {
+ if n > 0 {
+ p.addErr(errInvalidEncoding)
+ }
+ }
+}
+
+// restore parser position to the savepoint pt.
+func (p *parser) restore(pt savepoint) {
+ if p.debug {
+ defer p.out(p.in("restore"))
+ }
+ if pt.offset == p.pt.offset {
+ return
+ }
+ p.pt = pt
+}
+
+// get the slice of bytes from the savepoint start to the current position.
+func (p *parser) sliceFrom(start savepoint) []byte {
+ return p.data[start.position.offset:p.pt.position.offset]
+}
+
+func (p *parser) getMemoized(node interface{}) (resultTuple, bool) {
+ if len(p.memo) == 0 {
+ return resultTuple{}, false
+ }
+ m := p.memo[p.pt.offset]
+ if len(m) == 0 {
+ return resultTuple{}, false
+ }
+ res, ok := m[node]
+ return res, ok
+}
+
+func (p *parser) setMemoized(pt savepoint, node interface{}, tuple resultTuple) {
+ if p.memo == nil {
+ p.memo = make(map[int]map[interface{}]resultTuple)
+ }
+ m := p.memo[pt.offset]
+ if m == nil {
+ m = make(map[interface{}]resultTuple)
+ p.memo[pt.offset] = m
+ }
+ m[node] = tuple
+}
+
+func (p *parser) buildRulesTable(g *grammar) {
+ p.rules = make(map[string]*rule, len(g.rules))
+ for _, r := range g.rules {
+ p.rules[r.name] = r
+ }
+}
+
+func (p *parser) parse(g *grammar) (val interface{}, err error) {
+ if len(g.rules) == 0 {
+ p.addErr(errNoRule)
+ return nil, p.errs.err()
+ }
+
+ // TODO : not super critical but this could be generated
+ p.buildRulesTable(g)
+
+ if p.recover {
+ // panic can be used in action code to stop parsing immediately
+ // and return the panic as an error.
+ defer func() {
+ if e := recover(); e != nil {
+ if p.debug {
+ defer p.out(p.in("panic handler"))
+ }
+ val = nil
+ switch e := e.(type) {
+ case error:
+ p.addErr(e)
+ default:
+ p.addErr(fmt.Errorf("%v", e))
+ }
+ err = p.errs.err()
+ }
+ }()
+ }
+
+ // start rule is rule [0]
+ p.read() // advance to first rune
+ val, ok := p.parseRule(g.rules[0])
+ if !ok {
+ if len(*p.errs) == 0 {
+ // make sure this doesn't go out silently
+ p.addErr(errNoMatch)
+ }
+ return nil, p.errs.err()
+ }
+ return val, p.errs.err()
+}
+
+func (p *parser) parseRule(rule *rule) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseRule " + rule.name))
+ }
+
+ if p.memoize {
+ res, ok := p.getMemoized(rule)
+ if ok {
+ p.restore(res.end)
+ return res.v, res.b
+ }
+ }
+
+ start := p.pt
+ p.rstack = append(p.rstack, rule)
+ p.pushV()
+ val, ok := p.parseExpr(rule.expr)
+ p.popV()
+ p.rstack = p.rstack[:len(p.rstack)-1]
+ if ok && p.debug {
+ p.print(strings.Repeat(" ", p.depth)+"MATCH", string(p.sliceFrom(start)))
+ }
+
+ if p.memoize {
+ p.setMemoized(start, rule, resultTuple{val, ok, p.pt})
+ }
+ return val, ok
+}
+
+func (p *parser) parseExpr(expr interface{}) (interface{}, bool) {
+ var pt savepoint
+ var ok bool
+
+ if p.memoize {
+ res, ok := p.getMemoized(expr)
+ if ok {
+ p.restore(res.end)
+ return res.v, res.b
+ }
+ pt = p.pt
+ }
+
+ p.exprCnt++
+ var val interface{}
+ switch expr := expr.(type) {
+ case *actionExpr:
+ val, ok = p.parseActionExpr(expr)
+ case *andCodeExpr:
+ val, ok = p.parseAndCodeExpr(expr)
+ case *andExpr:
+ val, ok = p.parseAndExpr(expr)
+ case *anyMatcher:
+ val, ok = p.parseAnyMatcher(expr)
+ case *charClassMatcher:
+ val, ok = p.parseCharClassMatcher(expr)
+ case *choiceExpr:
+ val, ok = p.parseChoiceExpr(expr)
+ case *labeledExpr:
+ val, ok = p.parseLabeledExpr(expr)
+ case *litMatcher:
+ val, ok = p.parseLitMatcher(expr)
+ case *notCodeExpr:
+ val, ok = p.parseNotCodeExpr(expr)
+ case *notExpr:
+ val, ok = p.parseNotExpr(expr)
+ case *oneOrMoreExpr:
+ val, ok = p.parseOneOrMoreExpr(expr)
+ case *ruleRefExpr:
+ val, ok = p.parseRuleRefExpr(expr)
+ case *seqExpr:
+ val, ok = p.parseSeqExpr(expr)
+ case *zeroOrMoreExpr:
+ val, ok = p.parseZeroOrMoreExpr(expr)
+ case *zeroOrOneExpr:
+ val, ok = p.parseZeroOrOneExpr(expr)
+ default:
+ panic(fmt.Sprintf("unknown expression type %T", expr))
+ }
+ if p.memoize {
+ p.setMemoized(pt, expr, resultTuple{val, ok, p.pt})
+ }
+ return val, ok
+}
+
+func (p *parser) parseActionExpr(act *actionExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseActionExpr"))
+ }
+
+ start := p.pt
+ val, ok := p.parseExpr(act.expr)
+ if ok {
+ p.cur.pos = start.position
+ p.cur.text = p.sliceFrom(start)
+ actVal, err := act.run(p)
+ if err != nil {
+ p.addErrAt(err, start.position)
+ }
+ val = actVal
+ }
+ if ok && p.debug {
+ p.print(strings.Repeat(" ", p.depth)+"MATCH", string(p.sliceFrom(start)))
+ }
+ return val, ok
+}
+
+func (p *parser) parseAndCodeExpr(and *andCodeExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseAndCodeExpr"))
+ }
+
+ ok, err := and.run(p)
+ if err != nil {
+ p.addErr(err)
+ }
+ return nil, ok
+}
+
+func (p *parser) parseAndExpr(and *andExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseAndExpr"))
+ }
+
+ pt := p.pt
+ p.pushV()
+ _, ok := p.parseExpr(and.expr)
+ p.popV()
+ p.restore(pt)
+ return nil, ok
+}
+
+func (p *parser) parseAnyMatcher(any *anyMatcher) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseAnyMatcher"))
+ }
+
+ if p.pt.rn != utf8.RuneError {
+ start := p.pt
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ return nil, false
+}
+
+func (p *parser) parseCharClassMatcher(chr *charClassMatcher) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseCharClassMatcher"))
+ }
+
+ cur := p.pt.rn
+ // can't match EOF
+ if cur == utf8.RuneError {
+ return nil, false
+ }
+ start := p.pt
+ if chr.ignoreCase {
+ cur = unicode.ToLower(cur)
+ }
+
+ // try to match in the list of available chars
+ for _, rn := range chr.chars {
+ if rn == cur {
+ if chr.inverted {
+ return nil, false
+ }
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ }
+
+ // try to match in the list of ranges
+ for i := 0; i < len(chr.ranges); i += 2 {
+ if cur >= chr.ranges[i] && cur <= chr.ranges[i+1] {
+ if chr.inverted {
+ return nil, false
+ }
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ }
+
+ // try to match in the list of Unicode classes
+ for _, cl := range chr.classes {
+ if unicode.Is(cl, cur) {
+ if chr.inverted {
+ return nil, false
+ }
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ }
+
+ if chr.inverted {
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ return nil, false
+}
+
+func (p *parser) parseChoiceExpr(ch *choiceExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseChoiceExpr"))
+ }
+
+ for _, alt := range ch.alternatives {
+ p.pushV()
+ val, ok := p.parseExpr(alt)
+ p.popV()
+ if ok {
+ return val, ok
+ }
+ }
+ return nil, false
+}
+
+func (p *parser) parseLabeledExpr(lab *labeledExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseLabeledExpr"))
+ }
+
+ p.pushV()
+ val, ok := p.parseExpr(lab.expr)
+ p.popV()
+ if ok && lab.label != "" {
+ m := p.vstack[len(p.vstack)-1]
+ m[lab.label] = val
+ }
+ return val, ok
+}
+
+func (p *parser) parseLitMatcher(lit *litMatcher) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseLitMatcher"))
+ }
+
+ start := p.pt
+ for _, want := range lit.val {
+ cur := p.pt.rn
+ if lit.ignoreCase {
+ cur = unicode.ToLower(cur)
+ }
+ if cur != want {
+ p.restore(start)
+ return nil, false
+ }
+ p.read()
+ }
+ return p.sliceFrom(start), true
+}
+
+func (p *parser) parseNotCodeExpr(not *notCodeExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseNotCodeExpr"))
+ }
+
+ ok, err := not.run(p)
+ if err != nil {
+ p.addErr(err)
+ }
+ return nil, !ok
+}
+
+func (p *parser) parseNotExpr(not *notExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseNotExpr"))
+ }
+
+ pt := p.pt
+ p.pushV()
+ _, ok := p.parseExpr(not.expr)
+ p.popV()
+ p.restore(pt)
+ return nil, !ok
+}
+
+func (p *parser) parseOneOrMoreExpr(expr *oneOrMoreExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseOneOrMoreExpr"))
+ }
+
+ var vals []interface{}
+
+ for {
+ p.pushV()
+ val, ok := p.parseExpr(expr.expr)
+ p.popV()
+ if !ok {
+ if len(vals) == 0 {
+ // did not match once, no match
+ return nil, false
+ }
+ return vals, true
+ }
+ vals = append(vals, val)
+ }
+}
+
+func (p *parser) parseRuleRefExpr(ref *ruleRefExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseRuleRefExpr " + ref.name))
+ }
+
+ if ref.name == "" {
+ panic(fmt.Sprintf("%s: invalid rule: missing name", ref.pos))
+ }
+
+ rule := p.rules[ref.name]
+ if rule == nil {
+ p.addErr(fmt.Errorf("undefined rule: %s", ref.name))
+ return nil, false
+ }
+ return p.parseRule(rule)
+}
+
+func (p *parser) parseSeqExpr(seq *seqExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseSeqExpr"))
+ }
+
+ var vals []interface{}
+
+ pt := p.pt
+ for _, expr := range seq.exprs {
+ val, ok := p.parseExpr(expr)
+ if !ok {
+ p.restore(pt)
+ return nil, false
+ }
+ vals = append(vals, val)
+ }
+ return vals, true
+}
+
+func (p *parser) parseZeroOrMoreExpr(expr *zeroOrMoreExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseZeroOrMoreExpr"))
+ }
+
+ var vals []interface{}
+
+ for {
+ p.pushV()
+ val, ok := p.parseExpr(expr.expr)
+ p.popV()
+ if !ok {
+ return vals, true
+ }
+ vals = append(vals, val)
+ }
+}
+
+func (p *parser) parseZeroOrOneExpr(expr *zeroOrOneExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseZeroOrOneExpr"))
+ }
+
+ p.pushV()
+ val, _ := p.parseExpr(expr.expr)
+ p.popV()
+ // whether it matched or not, consider it a match
+ return val, true
+}
+
+func rangeTable(class string) *unicode.RangeTable {
+ if rt, ok := unicode.Categories[class]; ok {
+ return rt
+ }
+ if rt, ok := unicode.Properties[class]; ok {
+ return rt
+ }
+ if rt, ok := unicode.Scripts[class]; ok {
+ return rt
+ }
+
+ // cannot happen
+ panic(fmt.Sprintf("invalid Unicode class: %s", class))
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/bootstrap/cmd/bootstrap-pigeon/main.go b/vendor/github.com/PuerkitoBio/pigeon/bootstrap/cmd/bootstrap-pigeon/main.go
new file mode 100644
index 0000000000..e6260e45ee
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/bootstrap/cmd/bootstrap-pigeon/main.go
@@ -0,0 +1,76 @@
+// Command bootstrap-pigeon generates a PEG parser from a PEG grammar
+// to bootstrap the pigeon command-line tool, as it is built using
+// a simplified bootstrapping grammar that understands just enough of the
+// pigeon grammar to parse it and build the tool.
+package main
+
+import (
+ "bufio"
+ "flag"
+ "fmt"
+ "os"
+
+ "github.com/PuerkitoBio/pigeon/ast"
+ "github.com/PuerkitoBio/pigeon/builder"
+)
+
+func main() {
+ dbgFlag := flag.Bool("debug", false, "set debug mode")
+ noBuildFlag := flag.Bool("x", false, "do not build, only parse")
+ outputFlag := flag.String("o", "", "output file, defaults to stdout")
+ flag.Parse()
+
+ if flag.NArg() > 1 {
+ fmt.Fprintf(os.Stderr, "USAGE: %s [options] [FILE]\n", os.Args[0])
+ os.Exit(1)
+ }
+
+ nm := "stdin"
+ inf := os.Stdin
+ if flag.NArg() == 1 {
+ f, err := os.Open(flag.Arg(0))
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(2)
+ }
+ defer f.Close()
+ inf = f
+ nm = flag.Arg(0)
+ }
+ in := bufio.NewReader(inf)
+
+ g, err := ParseReader(nm, in, Debug(*dbgFlag))
+ if err != nil {
+ fmt.Fprintln(os.Stderr, "parse error: ", err)
+ os.Exit(3)
+ }
+
+ if !*noBuildFlag {
+ outw := os.Stdout
+ if *outputFlag != "" {
+ f, err := os.Create(*outputFlag)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(4)
+ }
+ defer f.Close()
+ outw = f
+ }
+
+ if err := builder.BuildParser(outw, g.(*ast.Grammar)); err != nil {
+ fmt.Fprintln(os.Stderr, "build error: ", err)
+ os.Exit(5)
+ }
+ }
+}
+
+func (c *current) astPos() ast.Pos {
+ return ast.Pos{Line: c.pos.line, Col: c.pos.col, Off: c.pos.offset}
+}
+
+func toIfaceSlice(v interface{}) []interface{} {
+ if v == nil {
+ return nil
+ }
+ return v.([]interface{})
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/bootstrap/cmd/pegparse/main.go b/vendor/github.com/PuerkitoBio/pigeon/bootstrap/cmd/pegparse/main.go
new file mode 100644
index 0000000000..8a2fce7451
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/bootstrap/cmd/pegparse/main.go
@@ -0,0 +1,41 @@
+// Command pegparse is a helper command-line tool to test the bootstrap
+// parser.
+package main
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "log"
+ "os"
+
+ "github.com/PuerkitoBio/pigeon/bootstrap"
+)
+
+func main() {
+ if len(os.Args) > 2 {
+ fmt.Fprintln(os.Stderr, "USAGE: pegparse FILE")
+ os.Exit(1)
+ }
+
+ var in io.Reader
+
+ nm := "stdin"
+ if len(os.Args) == 2 {
+ f, err := os.Open(os.Args[1])
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(2)
+ }
+ defer f.Close()
+ in = f
+ nm = os.Args[1]
+ } else {
+ in = bufio.NewReader(os.Stdin)
+ }
+
+ p := bootstrap.NewParser()
+ if _, err := p.Parse(nm, in); err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/bootstrap/cmd/pegscan/main.go b/vendor/github.com/PuerkitoBio/pigeon/bootstrap/cmd/pegscan/main.go
new file mode 100644
index 0000000000..6dd2638d77
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/bootstrap/cmd/pegscan/main.go
@@ -0,0 +1,45 @@
+// Command pegscan is a helper command-line tool to test the bootstrap
+// scanner.
+package main
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/PuerkitoBio/pigeon/bootstrap"
+)
+
+func main() {
+ if len(os.Args) > 2 {
+ fmt.Fprintln(os.Stderr, "USAGE: pegscan FILE")
+ os.Exit(1)
+ }
+
+ var in io.Reader
+
+ nm := "stdin"
+ if len(os.Args) == 2 {
+ f, err := os.Open(os.Args[1])
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(2)
+ }
+ defer f.Close()
+ in = f
+ nm = os.Args[1]
+ } else {
+ in = bufio.NewReader(os.Stdin)
+ }
+
+ var s bootstrap.Scanner
+ s.Init(nm, in, nil)
+ for {
+ tok, ok := s.Scan()
+ fmt.Println(tok)
+ if !ok {
+ break
+ }
+ }
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/bootstrap/doc.go b/vendor/github.com/PuerkitoBio/pigeon/bootstrap/doc.go
new file mode 100644
index 0000000000..d36ddb1175
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/bootstrap/doc.go
@@ -0,0 +1,7 @@
+// Package bootstrap implements the scanner and parser to bootstrap the
+// PEG parser generator.
+//
+// It parses the PEG grammar into an ast that is then used to generate
+// a parser generator based on this PEG grammar. The generated parser
+// can then parse the grammar again, without the bootstrap package.
+package bootstrap
diff --git a/vendor/github.com/PuerkitoBio/pigeon/bootstrap/parser.go b/vendor/github.com/PuerkitoBio/pigeon/bootstrap/parser.go
new file mode 100644
index 0000000000..381809832a
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/bootstrap/parser.go
@@ -0,0 +1,438 @@
+package bootstrap
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/PuerkitoBio/pigeon/ast"
+)
+
+type errList []error
+
+func (e *errList) reset() {
+ *e = (*e)[:0]
+}
+
+func (e *errList) add(p ast.Pos, err error) {
+ *e = append(*e, fmt.Errorf("%s: %v", p, err))
+}
+
+func (e *errList) err() error {
+ if len(*e) == 0 {
+ return nil
+ }
+ return e
+}
+
+func (e *errList) Error() string {
+ switch len(*e) {
+ case 0:
+ return ""
+ case 1:
+ return (*e)[0].Error()
+ default:
+ var buf bytes.Buffer
+
+ for i, err := range *e {
+ if i > 0 {
+ buf.WriteRune('\n')
+ }
+ buf.WriteString(err.Error())
+ }
+ return buf.String()
+ }
+}
+
+// Parser holds the state to parse the PEG grammar into
+// an abstract syntax tree (AST).
+type Parser struct {
+ s Scanner
+ tok Token
+
+ errs *errList
+ dbg bool
+ pk Token
+}
+
+func (p *Parser) in(s string) string {
+ if p.dbg {
+ fmt.Println("IN "+s, p.tok.id, p.tok.lit)
+ }
+ return s
+}
+
+func (p *Parser) out(s string) {
+ if p.dbg {
+ fmt.Println("OUT "+s, p.tok.id, p.tok.lit)
+ }
+}
+
+// NewParser creates a new Parser.
+func NewParser() *Parser {
+ return &Parser{errs: new(errList)}
+}
+
+// Parse parses the data from the reader r and generates the AST
+// or returns an error if it fails. The filename is used as information
+// in the error messages.
+func (p *Parser) Parse(filename string, r io.Reader) (*ast.Grammar, error) {
+ p.errs.reset()
+ p.s.Init(filename, r, p.errs.add)
+
+ g := p.grammar()
+ return g, p.errs.err()
+}
+
+func (p *Parser) read() {
+ if p.pk.pos.Line != 0 {
+ p.tok = p.pk
+ p.pk = Token{}
+ return
+ }
+ tok, _ := p.s.Scan()
+ p.tok = tok
+}
+
+func (p *Parser) peek() Token {
+ if p.pk.pos.Line == 0 {
+ p.pk, _ = p.s.Scan()
+ }
+ return p.pk
+}
+
+func (p *Parser) skip(ids ...tid) {
+outer:
+ for {
+ for _, id := range ids {
+ if p.tok.id == id {
+ p.read()
+ continue outer
+ }
+ }
+ return
+ }
+}
+
+func (p *Parser) grammar() *ast.Grammar {
+ defer p.out(p.in("grammar"))
+
+ // advance to the first token
+ p.read()
+ g := ast.NewGrammar(p.tok.pos)
+
+ p.skip(eol, semicolon)
+ if p.tok.id == code {
+ g.Init = ast.NewCodeBlock(p.tok.pos, p.tok.lit)
+ p.read()
+ p.skip(eol, semicolon)
+ }
+
+ for {
+ if p.tok.id == eof {
+ return g
+ }
+ r := p.rule()
+ if r != nil {
+ g.Rules = append(g.Rules, r)
+ }
+ p.read()
+ p.skip(eol, semicolon)
+ }
+}
+
+func (p *Parser) expect(ids ...tid) bool {
+ if len(ids) == 0 {
+ return true
+ }
+
+ for _, id := range ids {
+ if p.tok.id == id {
+ return true
+ }
+ }
+ if len(ids) == 1 {
+ p.errs.add(p.tok.pos, fmt.Errorf("expected %s, got %s", ids[0], p.tok.id))
+ } else {
+ p.errs.add(p.tok.pos, fmt.Errorf("expected any of %v, got %s", ids, p.tok.id))
+ }
+ return false
+}
+
+func (p *Parser) rule() *ast.Rule {
+ defer p.out(p.in("rule"))
+
+ if !p.expect(ident) {
+ return nil
+ }
+ r := ast.NewRule(p.tok.pos, ast.NewIdentifier(p.tok.pos, p.tok.lit))
+ p.read()
+
+ if p.tok.id == str || p.tok.id == rstr || p.tok.id == char {
+ if strings.HasSuffix(p.tok.lit, "i") {
+ p.errs.add(p.tok.pos, errors.New("invalid suffix 'i'"))
+ return nil
+ }
+ s, err := strconv.Unquote(p.tok.lit)
+ if err != nil {
+ p.errs.add(p.tok.pos, err)
+ return nil
+ }
+ r.DisplayName = ast.NewStringLit(p.tok.pos, s)
+ p.read()
+ }
+
+ if !p.expect(ruledef) {
+ return nil
+ }
+ p.read()
+ p.skip(eol)
+
+ expr := p.expression()
+ if expr == nil {
+ p.errs.add(p.tok.pos, errors.New("missing expression"))
+ return nil
+ }
+ r.Expr = expr
+
+ if !p.expect(eol, eof, semicolon) {
+ p.errs.add(p.tok.pos, errors.New("rule not terminated"))
+ return nil
+ }
+ return r
+}
+
+func (p *Parser) expression() ast.Expression {
+ defer p.out(p.in("expression"))
+
+ choice := ast.NewChoiceExpr(p.tok.pos)
+ for {
+ expr := p.actionExpr()
+ if expr != nil {
+ choice.Alternatives = append(choice.Alternatives, expr)
+ }
+ if p.tok.id != slash {
+ switch len(choice.Alternatives) {
+ case 0:
+ p.errs.add(p.tok.pos, errors.New("no expression in choice"))
+ return nil
+ case 1:
+ return choice.Alternatives[0]
+ default:
+ return choice
+ }
+ }
+ // move after the slash
+ p.read()
+ }
+}
+
+func (p *Parser) actionExpr() ast.Expression {
+ defer p.out(p.in("actionExpr"))
+
+ act := ast.NewActionExpr(p.tok.pos)
+ expr := p.seqExpr()
+ if expr == nil {
+ return nil
+ }
+ act.Expr = expr
+
+ if p.tok.id == code {
+ act.Code = ast.NewCodeBlock(p.tok.pos, p.tok.lit)
+ p.read()
+ }
+
+ if act.Code == nil {
+ return expr
+ }
+ return act
+}
+
+func (p *Parser) seqExpr() ast.Expression {
+ defer p.out(p.in("seqExpr"))
+
+ seq := ast.NewSeqExpr(p.tok.pos)
+ for {
+ expr := p.labeledExpr()
+ if expr == nil {
+ switch len(seq.Exprs) {
+ case 0:
+ p.errs.add(p.tok.pos, errors.New("no expression in sequence"))
+ return nil
+ case 1:
+ return seq.Exprs[0]
+ default:
+ return seq
+ }
+ }
+ seq.Exprs = append(seq.Exprs, expr)
+ }
+}
+
+func (p *Parser) labeledExpr() ast.Expression {
+ defer p.out(p.in("labeledExpr"))
+
+ lab := ast.NewLabeledExpr(p.tok.pos)
+ if p.tok.id == ident {
+ peek := p.peek()
+ if peek.id == colon {
+ label := ast.NewIdentifier(p.tok.pos, p.tok.lit)
+ lab.Label = label
+ p.read()
+ if !p.expect(colon) {
+ return nil
+ }
+ p.read()
+ }
+ }
+
+ expr := p.prefixedExpr()
+ if expr == nil {
+ if lab.Label != nil {
+ p.errs.add(p.tok.pos, errors.New("label without expression"))
+ }
+ return nil
+ }
+
+ if lab.Label != nil {
+ lab.Expr = expr
+ return lab
+ }
+ return expr
+}
+
+func (p *Parser) prefixedExpr() ast.Expression {
+ defer p.out(p.in("prefixedExpr"))
+
+ var pref ast.Expression
+ switch p.tok.id {
+ case ampersand:
+ pref = ast.NewAndExpr(p.tok.pos)
+ p.read()
+ case exclamation:
+ pref = ast.NewNotExpr(p.tok.pos)
+ p.read()
+ }
+
+ expr := p.suffixedExpr()
+ if expr == nil {
+ if pref != nil {
+ p.errs.add(p.tok.pos, errors.New("prefix operator without expression"))
+ }
+ return nil
+ }
+ switch p := pref.(type) {
+ case *ast.AndExpr:
+ p.Expr = expr
+ return p
+ case *ast.NotExpr:
+ p.Expr = expr
+ return p
+ default:
+ return expr
+ }
+}
+
+func (p *Parser) suffixedExpr() ast.Expression {
+ defer p.out(p.in("suffixedExpr"))
+
+ expr := p.primaryExpr()
+ if expr == nil {
+ if p.tok.id == question || p.tok.id == star || p.tok.id == plus {
+ p.errs.add(p.tok.pos, errors.New("suffix operator without expression"))
+ }
+ return nil
+ }
+
+ switch p.tok.id {
+ case question:
+ q := ast.NewZeroOrOneExpr(expr.Pos())
+ q.Expr = expr
+ p.read()
+ return q
+ case star:
+ s := ast.NewZeroOrMoreExpr(expr.Pos())
+ s.Expr = expr
+ p.read()
+ return s
+ case plus:
+ l := ast.NewOneOrMoreExpr(expr.Pos())
+ l.Expr = expr
+ p.read()
+ return l
+ default:
+ return expr
+ }
+}
+
+func (p *Parser) primaryExpr() ast.Expression {
+ defer p.out(p.in("primaryExpr"))
+
+ switch p.tok.id {
+ case str, rstr, char:
+ // literal matcher
+ ignore := strings.HasSuffix(p.tok.lit, "i")
+ if ignore {
+ p.tok.lit = p.tok.lit[:len(p.tok.lit)-1]
+ }
+ s, err := strconv.Unquote(p.tok.lit)
+ if err != nil {
+ p.errs.add(p.tok.pos, err)
+ }
+ lit := ast.NewLitMatcher(p.tok.pos, s)
+ lit.IgnoreCase = ignore
+ p.read()
+ return lit
+
+ case class:
+ // character class matcher
+ cl := ast.NewCharClassMatcher(p.tok.pos, p.tok.lit)
+ p.read()
+ return cl
+
+ case dot:
+ // any matcher
+ any := ast.NewAnyMatcher(p.tok.pos, p.tok.lit)
+ p.read()
+ return any
+
+ case ident:
+ // rule reference expression
+ return p.ruleRefExpr()
+
+ case lparen:
+ // expression in parenthesis
+ p.read()
+ expr := p.expression()
+ if expr == nil {
+ p.errs.add(p.tok.pos, errors.New("missing expression inside parenthesis"))
+ return nil
+ }
+ if !p.expect(rparen) {
+ return nil
+ }
+ p.read()
+ return expr
+
+ default:
+ // if p.tok.id != eof && p.tok.id != eol && p.tok.id != semicolon {
+ // p.errs.add(p.tok.pos, fmt.Errorf("invalid token %s (%q) for primary expression", p.tok.id, p.tok.lit))
+ // }
+ return nil
+ }
+}
+
+func (p *Parser) ruleRefExpr() ast.Expression {
+ defer p.out(p.in("ruleRefExpr"))
+
+ if !p.expect(ident) {
+ return nil
+ }
+ expr := ast.NewRuleRefExpr(p.tok.pos)
+ expr.Name = ast.NewIdentifier(p.tok.pos, p.tok.lit)
+ p.read()
+ return expr
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/bootstrap/parser_test.go b/vendor/github.com/PuerkitoBio/pigeon/bootstrap/parser_test.go
new file mode 100644
index 0000000000..07b94db85e
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/bootstrap/parser_test.go
@@ -0,0 +1,97 @@
+package bootstrap
+
+import (
+ "strings"
+ "testing"
+)
+
+var parseValidCases = []string{
+ "",
+ "\n",
+ "\n{code}",
+ "\nR <- 'c'",
+ "\n\nR <- 'c'\n\n",
+ `
+A = ident:B / C+ / D?;`,
+ `{ code }
+
+R "name" <- "abc"i
+R2 = 'd'i
+R3 = ( R2+ ![;] )`,
+}
+
+var parseExpRes = []string{
+ `1:0 (0): *ast.Grammar{Init: , Rules: [
+]}`,
+ `2:0 (0): *ast.Grammar{Init: , Rules: [
+]}`,
+ `2:0 (0): *ast.Grammar{Init: 2:1 (1): *ast.CodeBlock{Val: "{code}"}, Rules: [
+]}`,
+ `2:0 (0): *ast.Grammar{Init: , Rules: [
+2:1 (1): *ast.Rule{Name: 2:1 (1): *ast.Identifier{Val: "R"}, DisplayName: , Expr: 2:6 (6): *ast.LitMatcher{Val: "c", IgnoreCase: false}},
+]}`,
+ `2:0 (0): *ast.Grammar{Init: , Rules: [
+3:1 (2): *ast.Rule{Name: 3:1 (2): *ast.Identifier{Val: "R"}, DisplayName: , Expr: 3:6 (7): *ast.LitMatcher{Val: "c", IgnoreCase: false}},
+]}`,
+ `2:0 (0): *ast.Grammar{Init: , Rules: [
+2:1 (1): *ast.Rule{Name: 2:1 (1): *ast.Identifier{Val: "A"}, DisplayName: , Expr: 2:5 (5): *ast.ChoiceExpr{Alternatives: [
+2:5 (5): *ast.LabeledExpr{Label: 2:5 (5): *ast.Identifier{Val: "ident"}, Expr: 2:11 (11): *ast.RuleRefExpr{Name: 2:11 (11): *ast.Identifier{Val: "B"}}},
+2:15 (15): *ast.OneOrMoreExpr{Expr: 2:15 (15): *ast.RuleRefExpr{Name: 2:15 (15): *ast.Identifier{Val: "C"}}},
+2:20 (20): *ast.ZeroOrOneExpr{Expr: 2:20 (20): *ast.RuleRefExpr{Name: 2:20 (20): *ast.Identifier{Val: "D"}}},
+]}},
+]}`,
+ `1:1 (0): *ast.Grammar{Init: 1:1 (0): *ast.CodeBlock{Val: "{ code }"}, Rules: [
+3:1 (10): *ast.Rule{Name: 3:1 (10): *ast.Identifier{Val: "R"}, DisplayName: 3:3 (12): *ast.StringLit{Val: "name"}, Expr: 3:13 (22): *ast.LitMatcher{Val: "abc", IgnoreCase: true}},
+4:1 (29): *ast.Rule{Name: 4:1 (29): *ast.Identifier{Val: "R2"}, DisplayName: , Expr: 4:6 (34): *ast.LitMatcher{Val: "d", IgnoreCase: true}},
+5:1 (39): *ast.Rule{Name: 5:1 (39): *ast.Identifier{Val: "R3"}, DisplayName: , Expr: 5:8 (46): *ast.SeqExpr{Exprs: [
+5:8 (46): *ast.OneOrMoreExpr{Expr: 5:8 (46): *ast.RuleRefExpr{Name: 5:8 (46): *ast.Identifier{Val: "R2"}}},
+5:12 (50): *ast.NotExpr{Expr: 5:13 (51): *ast.CharClassMatcher{Val: "[;]", IgnoreCase: false, Inverted: false}},
+]}},
+]}`,
+}
+
+func TestParseValid(t *testing.T) {
+ p := NewParser()
+ for i, c := range parseValidCases {
+ g, err := p.Parse("", strings.NewReader(c))
+ if err != nil {
+ t.Errorf("%d: got error %v", i, err)
+ continue
+ }
+
+ want := parseExpRes[i]
+ got := g.String()
+ if want != got {
+ t.Errorf("%d: want \n%s\n, got \n%s\n", i, want, got)
+ }
+ }
+}
+
+var parseInvalidCases = []string{
+ "a",
+ `R = )`,
+}
+
+var parseExpErrs = [][]string{
+ {"1:1 (0): expected ruledef, got eof"},
+ {"1:5 (4): no expression in sequence", "1:5 (4): no expression in choice", "1:5 (4): missing expression"},
+}
+
+func TestParseInvalid(t *testing.T) {
+ p := NewParser()
+ for i, c := range parseInvalidCases {
+ _, err := p.Parse("", strings.NewReader(c))
+ el := *(err.(*errList))
+ if len(el) != len(parseExpErrs[i]) {
+ t.Errorf("%d: want %d errors, got %d", i, len(parseExpErrs[i]), len(el))
+ continue
+ }
+ for j, err := range el {
+ want := parseExpErrs[i][j]
+ got := err.Error()
+ if want != got {
+ t.Errorf("%d: error %d: want %q, got %q", i, j, want, got)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/bootstrap/peg.ebnf b/vendor/github.com/PuerkitoBio/pigeon/bootstrap/peg.ebnf
new file mode 100644
index 0000000000..1d9386d728
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/bootstrap/peg.ebnf
@@ -0,0 +1,25 @@
+// PEG grammar in EBNF form, to help implement the bootstrapping
+// parser. Terminals are tokens as defined in token.go and
+// returned by the Scanner implemented in scan.go.
+
+Grammar = [ code ] [ RuleList ] .
+RuleList = Rule { Rule } .
+Rule = ident [ str | rstr | char ] ruledef Expression ( eol | eof | semicolon ) .
+
+Expression = ChoiceExpr .
+ChoiceExpr = ActionExpr { "/" ActionExpr } .
+ActionExpr = SeqExpr [ code ] .
+SeqExpr = LabeledExpr { LabeledExpr } .
+LabeledExpr = [ ident colon ] PrefixedExpr .
+PrefixedExpr = [ PrefixedOp ] SuffixedExpr .
+PrefixedOp = ampersand | exclamation .
+SuffixedExpr = PrimaryExpr [ SuffixedOp ] .
+SuffixedOp = question | star | plus .
+PrimaryExpr = LiteralMatcher | CharClassMatcher | AnyMatcher | RuleRefExpr |
+ lparen Expression rparen .
+
+RuleRefExpr = ident .
+
+LiteralMatcher = [ str | rstr | char ] .
+CharClassMatcher = class .
+AnyMatcher = dot .
diff --git a/vendor/github.com/PuerkitoBio/pigeon/bootstrap/scan.go b/vendor/github.com/PuerkitoBio/pigeon/bootstrap/scan.go
new file mode 100644
index 0000000000..3f071bd23a
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/bootstrap/scan.go
@@ -0,0 +1,559 @@
+package bootstrap
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "unicode"
+
+ "github.com/PuerkitoBio/pigeon/ast"
+)
+
+// Scanner tokenizes an input source for the PEG grammar.
+type Scanner struct {
+ r io.RuneReader
+ errh func(ast.Pos, error)
+
+ eof bool
+ cpos ast.Pos
+ cur rune
+ cw int
+
+ tok bytes.Buffer
+}
+
+// Init initializes the scanner to read and tokenize text from r.
+func (s *Scanner) Init(filename string, r io.Reader, errh func(ast.Pos, error)) {
+ s.r = runeReader(r)
+ s.errh = errh
+
+ s.eof = false
+ s.cpos = ast.Pos{
+ Filename: filename,
+ Line: 1,
+ }
+
+ s.cur, s.cw = -1, 0
+ s.tok.Reset()
+}
+
+// Scan returns the next token, along with a boolean indicating if EOF was
+// reached (false means no more tokens).
+func (s *Scanner) Scan() (Token, bool) {
+ var tok Token
+
+ if !s.eof && s.cur == -1 {
+ // move to first rune
+ s.read()
+ }
+
+ s.skipWhitespace()
+ tok.pos = s.cpos
+
+ // the first switch cases all position the scanner on the next rune
+ // by their calls to scan*
+ switch {
+ case s.eof:
+ tok.id = eof
+ case isLetter(s.cur):
+ tok.id = ident
+ tok.lit = s.scanIdentifier()
+ if _, ok := blacklistedIdents[tok.lit]; ok {
+ s.errorpf(tok.pos, "illegal identifier %q", tok.lit)
+ }
+ case isRuleDefStart(s.cur):
+ tok.id = ruledef
+ tok.lit = s.scanRuleDef()
+ case s.cur == '\'':
+ tok.id = char
+ tok.lit = s.scanChar()
+ case s.cur == '"':
+ tok.id = str
+ tok.lit = s.scanString()
+ case s.cur == '`':
+ tok.id = rstr
+ tok.lit = s.scanRawString()
+ case s.cur == '[':
+ tok.id = class
+ tok.lit = s.scanClass()
+ case s.cur == '{':
+ tok.id = code
+ tok.lit = s.scanCode()
+
+ default:
+ r := s.cur
+ s.read()
+ switch r {
+ case '/':
+ if s.cur == '*' || s.cur == '/' {
+ tok.id, tok.lit = s.scanComment()
+ break
+ }
+ fallthrough
+ case ':', ';', '(', ')', '.', '&', '!', '?', '+', '*', '\n':
+ tok.id = tid(r)
+ tok.lit = string(r)
+ default:
+ s.errorf("invalid character %#U", r)
+ tok.id = invalid
+ tok.lit = string(r)
+ }
+ }
+
+ return tok, tok.id != eof
+}
+
+func (s *Scanner) scanIdentifier() string {
+ s.tok.Reset()
+ for isLetter(s.cur) || isDigit(s.cur) {
+ s.tok.WriteRune(s.cur)
+ s.read()
+ }
+ return s.tok.String()
+}
+
+func (s *Scanner) scanComment() (tid, string) {
+ s.tok.Reset()
+ s.tok.WriteRune('/') // initial '/' already consumed
+
+ var multiline bool
+ switch s.cur {
+ case '*':
+ multiline = true
+ case '\n', -1:
+ s.errorf("comment not terminated")
+ return lcomment, s.tok.String()
+ }
+
+ var closing bool
+ for {
+ s.tok.WriteRune(s.cur)
+ s.read()
+ switch s.cur {
+ case '\n':
+ if !multiline {
+ return lcomment, s.tok.String()
+ }
+ case -1:
+ if multiline {
+ s.errorf("comment not terminated")
+ return mlcomment, s.tok.String()
+ }
+ return lcomment, s.tok.String()
+ case '*':
+ if multiline {
+ closing = true
+ }
+ case '/':
+ if closing {
+ s.tok.WriteRune(s.cur)
+ s.read()
+ return mlcomment, s.tok.String()
+ }
+ }
+ }
+}
+
+func (s *Scanner) scanCode() string {
+ s.tok.Reset()
+ s.tok.WriteRune(s.cur)
+ depth := 1
+ for {
+ s.read()
+ s.tok.WriteRune(s.cur)
+ switch s.cur {
+ case -1:
+ s.errorf("code block not terminated")
+ return s.tok.String()
+ case '{':
+ depth++
+ case '}':
+ depth--
+ if depth == 0 {
+ s.read()
+ return s.tok.String()
+ }
+ }
+ }
+}
+
+func (s *Scanner) scanEscape(quote rune) bool {
+ // scanEscape is always called as part of a greater token, so do not
+ // reset s.tok, and write s.cur before calling s.read.
+ s.tok.WriteRune(s.cur)
+
+ var n int
+ var base, max uint32
+ var unicodeClass bool
+
+ s.read()
+ switch s.cur {
+ case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote:
+ s.tok.WriteRune(s.cur)
+ return true
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ n, base, max = 3, 8, 255
+ case 'x':
+ s.tok.WriteRune(s.cur)
+ s.read()
+ n, base, max = 2, 16, 255
+ case 'u':
+ s.tok.WriteRune(s.cur)
+ s.read()
+ n, base, max = 4, 16, unicode.MaxRune
+ case 'U':
+ s.tok.WriteRune(s.cur)
+ s.read()
+ n, base, max = 8, 16, unicode.MaxRune
+ case 'p':
+ // unicode character class, only valid if quote is ']'
+ if quote == ']' {
+ s.tok.WriteRune(s.cur)
+ unicodeClass = true
+ s.read()
+ break
+ }
+ fallthrough
+ default:
+ s.tok.WriteRune(s.cur)
+ msg := "unknown escape sequence"
+ if s.cur == -1 || s.cur == '\n' {
+ msg = "escape sequence not terminated"
+ s.errorf(msg)
+ } else {
+ s.errorf(msg)
+ s.read()
+ }
+ return false
+ }
+
+ if unicodeClass {
+ switch s.cur {
+ case '\n', -1:
+ s.errorf("escape sequence not terminated")
+ return false
+ case '{':
+ // unicode class name, read until '}'
+ cnt := 0
+ for {
+ s.tok.WriteRune(s.cur)
+ s.read()
+ cnt++
+ switch s.cur {
+ case '\n', -1:
+ s.errorf("escape sequence not terminated")
+ return false
+ case '}':
+ if cnt < 2 {
+ s.errorf("empty Unicode character class escape sequence")
+ }
+ s.tok.WriteRune(s.cur)
+ return true
+ }
+ }
+ default:
+ // single letter class
+ s.tok.WriteRune(s.cur)
+ return true
+ }
+ }
+
+ var x uint32
+ for n > 0 {
+ s.tok.WriteRune(s.cur)
+ d := uint32(digitVal(s.cur))
+ if d >= base {
+ msg := fmt.Sprintf("illegal character %#U in escape sequence", s.cur)
+ if s.cur == -1 || s.cur == '\n' {
+ msg = "escape sequence not terminated"
+ s.errorf(msg)
+ return false
+ }
+ s.errorf(msg)
+ s.read()
+ return false
+ }
+ x = x*base + d
+ n--
+
+ if n > 0 {
+ s.read()
+ }
+ }
+
+ if x > max || 0xd800 <= x && x <= 0xe000 {
+ s.errorf("escape sequence is invalid Unicode code point")
+ s.read()
+ return false
+ }
+ return true
+}
+
+func (s *Scanner) scanClass() string {
+ s.tok.Reset()
+ s.tok.WriteRune(s.cur) // opening '['
+
+ var noread bool
+ for {
+ if !noread {
+ s.read()
+ }
+ noread = false
+ switch s.cur {
+ case '\\':
+ noread = !s.scanEscape(']')
+ case '\n', -1:
+ // \n not consumed
+ s.errorf("character class not terminated")
+ return s.tok.String()
+ case ']':
+ s.tok.WriteRune(s.cur)
+ s.read()
+ // can have an optional "i" ignore case suffix
+ if s.cur == 'i' {
+ s.tok.WriteRune(s.cur)
+ s.read()
+ }
+ return s.tok.String()
+ default:
+ s.tok.WriteRune(s.cur)
+ }
+ }
+}
+
+func (s *Scanner) scanRawString() string {
+ s.tok.Reset()
+ s.tok.WriteRune(s.cur) // opening '`'
+
+ var hasCR bool
+loop:
+ for {
+ s.read()
+ switch s.cur {
+ case -1:
+ s.errorf("raw string literal not terminated")
+ break loop
+ case '`':
+ s.tok.WriteRune(s.cur)
+ s.read()
+ // can have an optional "i" ignore case suffix
+ if s.cur == 'i' {
+ s.tok.WriteRune(s.cur)
+ s.read()
+ }
+ break loop
+ case '\r':
+ hasCR = true
+ fallthrough
+ default:
+ s.tok.WriteRune(s.cur)
+ }
+ }
+
+ b := s.tok.Bytes()
+ if hasCR {
+ b = stripCR(b)
+ }
+ return string(b)
+}
+
+func stripCR(b []byte) []byte {
+ c := make([]byte, len(b))
+ i := 0
+ for _, ch := range b {
+ if ch != '\r' {
+ c[i] = ch
+ i++
+ }
+ }
+ return c[:i]
+}
+
+func (s *Scanner) scanString() string {
+ s.tok.Reset()
+ s.tok.WriteRune(s.cur) // opening '"'
+
+ var noread bool
+ for {
+ if !noread {
+ s.read()
+ }
+ noread = false
+ switch s.cur {
+ case '\\':
+ noread = !s.scanEscape('"')
+ case '\n', -1:
+ // \n not consumed
+ s.errorf("string literal not terminated")
+ return s.tok.String()
+ case '"':
+ s.tok.WriteRune(s.cur)
+ s.read()
+ // can have an optional "i" ignore case suffix
+ if s.cur == 'i' {
+ s.tok.WriteRune(s.cur)
+ s.read()
+ }
+ return s.tok.String()
+ default:
+ s.tok.WriteRune(s.cur)
+ }
+ }
+}
+
+func (s *Scanner) scanChar() string {
+ s.tok.Reset()
+ s.tok.WriteRune(s.cur) // opening "'"
+
+ // must be followed by one char (which may be an escape) and a single
+ // quote, but read until we find that closing quote.
+ cnt := 0
+ var noread bool
+ for {
+ if !noread {
+ s.read()
+ }
+ noread = false
+ switch s.cur {
+ case '\\':
+ cnt++
+ noread = !s.scanEscape('\'')
+ case '\n', -1:
+ // \n not consumed
+ s.errorf("rune literal not terminated")
+ return s.tok.String()
+ case '\'':
+ s.tok.WriteRune(s.cur)
+ s.read()
+ if cnt != 1 {
+ s.errorf("rune literal is not a single rune")
+ }
+ // can have an optional "i" ignore case suffix
+ if s.cur == 'i' {
+ s.tok.WriteRune(s.cur)
+ s.read()
+ }
+ return s.tok.String()
+ default:
+ cnt++
+ s.tok.WriteRune(s.cur)
+ }
+ }
+}
+
+func (s *Scanner) scanRuleDef() string {
+ s.tok.Reset()
+ s.tok.WriteRune(s.cur)
+ r := s.cur
+ s.read()
+ if r == '<' {
+ if s.cur != -1 {
+ s.tok.WriteRune(s.cur)
+ }
+ if s.cur != '-' {
+ s.errorf("rule definition not terminated")
+ }
+ s.read()
+ }
+
+ return s.tok.String()
+}
+
+// read advances the Scanner to the next rune.
+func (s *Scanner) read() {
+ if s.eof {
+ return
+ }
+
+ r, w, err := s.r.ReadRune()
+ if err != nil {
+ s.fatalError(err)
+ return
+ }
+
+ s.cur = r
+ s.cpos.Off += s.cw
+ s.cw = w
+
+ // newline is '\n' as in Go
+ if r == '\n' {
+ s.cpos.Line++
+ s.cpos.Col = 0
+ } else {
+ s.cpos.Col++
+ }
+}
+
+// whitespace is the same as Go, except that it doesn't skip newlines,
+// those are returned as tokens.
+func (s *Scanner) skipWhitespace() {
+ for s.cur == ' ' || s.cur == '\t' || s.cur == '\r' {
+ s.read()
+ }
+}
+
+func isRuleDefStart(r rune) bool {
+ return r == '=' || r == '<' || r == '\u2190' /* leftwards arrow */ ||
+ r == '\u27f5' /* long leftwards arrow */
+}
+
+// isLetter has the same definition as Go.
+func isLetter(r rune) bool {
+ return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || r == '_' ||
+ r >= 0x80 && unicode.IsLetter(r)
+}
+
+// isDigit has the same definition as Go.
+func isDigit(r rune) bool {
+ return '0' <= r && r <= '9' || r >= 0x80 && unicode.IsDigit(r)
+}
+
+func digitVal(r rune) int {
+ switch {
+ case '0' <= r && r <= '9':
+ return int(r - '0')
+ case 'a' <= r && r <= 'f':
+ return int(r - 'a' + 10)
+ case 'A' <= r && r <= 'F':
+ return int(r - 'A' + 10)
+ }
+ return 16
+}
+
+// notify the handler of an error.
+func (s *Scanner) error(p ast.Pos, err error) {
+ if s.errh != nil {
+ s.errh(p, err)
+ return
+ }
+ fmt.Fprintf(os.Stderr, "%s: %v\n", p, err)
+}
+
+// helper to generate and notify of an error.
+func (s *Scanner) errorf(f string, args ...interface{}) {
+ s.errorpf(s.cpos, f, args...)
+}
+
+// helper to generate and notify of an error at a specific position.
+func (s *Scanner) errorpf(p ast.Pos, f string, args ...interface{}) {
+ s.error(p, fmt.Errorf(f, args...))
+}
+
+// notify a non-recoverable error that terminates the scanning.
+func (s *Scanner) fatalError(err error) {
+ s.cur = -1
+ s.eof = true
+ if err != io.EOF {
+ s.error(s.cpos, err)
+ }
+}
+
+// convert the reader to a rune reader if required.
+func runeReader(r io.Reader) io.RuneReader {
+ if rr, ok := r.(io.RuneReader); ok {
+ return rr
+ }
+ return bufio.NewReader(r)
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/bootstrap/scan_test.go b/vendor/github.com/PuerkitoBio/pigeon/bootstrap/scan_test.go
new file mode 100644
index 0000000000..d5c1af1a74
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/bootstrap/scan_test.go
@@ -0,0 +1,358 @@
+package bootstrap
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ "github.com/PuerkitoBio/pigeon/ast"
+)
+
+var scanValidCases = []string{
+ "",
+ "a",
+ "ab",
+ "abc",
+ "_",
+ "_0",
+ "abc_012",
+ `=`,
+ `<-`,
+ "\u2190",
+ "\u27f5",
+ "' '",
+ "'*'",
+ "'a'",
+ "'a'i",
+ "'a'b",
+ `'\n'`,
+ `'\t'`,
+ `'\''`,
+ `'\\'`,
+ `'\xab'`,
+ `'\x1F'`,
+ `'\u1234'`,
+ `'\U000B1234'`,
+ `""`,
+ `"a"`,
+ `"a"i`,
+ `"a"b`,
+ `"a\b"`,
+ `"a\b \n 1"`,
+ `"\xAbc\u1234d\U000011FF"`,
+ "``",
+ "`a`",
+ "`a`i",
+ "`a`b",
+ "`a \\n `", // `a \n `
+ "`a \n `", // `a `
+ "`a \r\n `",
+ "[]",
+ "[[]",
+ "[[\\]]",
+ "[a]",
+ "[a]i",
+ "[a]b",
+ "[ab]",
+ "[a-b0-9]",
+ "[\\a]",
+ "[\\a\\pL_]",
+ "[\\a\\p{Greek}]",
+ "{}",
+ "{a}",
+ "{a}i",
+ "{\nif something {\n\tdoSomething()\n}\n}",
+ "// a",
+ "// a\nb",
+ "/a",
+ "/\n",
+ "/**/",
+ "/*a*/",
+ "/*a\nb*/",
+ ":",
+ ";",
+ "(",
+ ")",
+ ".",
+ "&",
+ "!",
+ "?",
+ "+",
+ "*",
+ "\n",
+ "pockage = a",
+ `Rule <-
+ E / ( 'a'? "bcd"i )+ / [efg-j]* { println() } // comment
+ / &'\xff' /* and
+some
+comment
+*/`,
+}
+
+var scanExpTokens = [][]string{
+ {"1:0 (0): eof \"\""},
+ {"1:1 (0): ident \"a\"", "1:1 (0): eof \"\""},
+ {"1:1 (0): ident \"ab\"", "1:2 (1): eof \"\""},
+ {"1:1 (0): ident \"abc\"", "1:3 (2): eof \"\""},
+ {"1:1 (0): ident \"_\"", "1:1 (0): eof \"\""},
+ {"1:1 (0): ident \"_0\"", "1:2 (1): eof \"\""},
+ {"1:1 (0): ident \"abc_012\"", "1:7 (6): eof \"\""},
+ {"1:1 (0): ruledef \"=\"", "1:1 (0): eof \"\""},
+ {"1:1 (0): ruledef \"<-\"", "1:2 (1): eof \"\""},
+ {"1:1 (0): ruledef \"\u2190\"", "1:1 (0): eof \"\""},
+ {"1:1 (0): ruledef \"\u27f5\"", "1:1 (0): eof \"\""},
+ {"1:1 (0): char \"' '\"", "1:3 (2): eof \"\""},
+ {"1:1 (0): char \"'*'\"", "1:3 (2): eof \"\""},
+ {"1:1 (0): char \"'a'\"", "1:3 (2): eof \"\""},
+ {"1:1 (0): char \"'a'i\"", "1:4 (3): eof \"\""},
+ {"1:1 (0): char \"'a'\"", "1:4 (3): ident \"b\"", "1:4 (3): eof \"\""},
+ {`1:1 (0): char "'\\n'"`, `1:4 (3): eof ""`},
+ {`1:1 (0): char "'\\t'"`, `1:4 (3): eof ""`},
+ {`1:1 (0): char "'\\''"`, `1:4 (3): eof ""`},
+ {`1:1 (0): char "'\\\\'"`, `1:4 (3): eof ""`},
+ {`1:1 (0): char "'\\xab'"`, `1:6 (5): eof ""`},
+ {`1:1 (0): char "'\\x1F'"`, `1:6 (5): eof ""`},
+ {`1:1 (0): char "'\\u1234'"`, `1:8 (7): eof ""`},
+ {`1:1 (0): char "'\\U000B1234'"`, `1:12 (11): eof ""`},
+ {`1:1 (0): str "\"\""`, `1:2 (1): eof ""`},
+ {`1:1 (0): str "\"a\""`, `1:3 (2): eof ""`},
+ {`1:1 (0): str "\"a\"i"`, `1:4 (3): eof ""`},
+ {`1:1 (0): str "\"a\""`, `1:4 (3): ident "b"`, `1:4 (3): eof ""`},
+ {`1:1 (0): str "\"a\\b\""`, `1:5 (4): eof ""`},
+ {`1:1 (0): str "\"a\\b \\n 1\""`, `1:10 (9): eof ""`},
+ {`1:1 (0): str "\"\\xAbc\\u1234d\\U000011FF\""`, `1:24 (23): eof ""`},
+ {"1:1 (0): rstr \"``\"", `1:2 (1): eof ""`},
+ {"1:1 (0): rstr \"`a`\"", `1:3 (2): eof ""`},
+ {"1:1 (0): rstr \"`a`i\"", `1:4 (3): eof ""`},
+ {"1:1 (0): rstr \"`a`\"", "1:4 (3): ident \"b\"", `1:4 (3): eof ""`},
+ {"1:1 (0): rstr \"`a \\\\n `\"", `1:7 (6): eof ""`},
+ {"1:1 (0): rstr \"`a \\n `\"", `2:2 (5): eof ""`},
+ {"1:1 (0): rstr \"`a \\n `\"", `2:2 (6): eof ""`},
+ {"1:1 (0): class \"[]\"", `1:2 (1): eof ""`},
+ {"1:1 (0): class \"[[]\"", `1:3 (2): eof ""`},
+ {"1:1 (0): class \"[[\\\\]]\"", `1:5 (4): eof ""`},
+ {"1:1 (0): class \"[a]\"", `1:3 (2): eof ""`},
+ {"1:1 (0): class \"[a]i\"", `1:4 (3): eof ""`},
+ {"1:1 (0): class \"[a]\"", `1:4 (3): ident "b"`, `1:4 (3): eof ""`},
+ {"1:1 (0): class \"[ab]\"", `1:4 (3): eof ""`},
+ {"1:1 (0): class \"[a-b0-9]\"", `1:8 (7): eof ""`},
+ {"1:1 (0): class \"[\\\\a]\"", `1:4 (3): eof ""`},
+ {"1:1 (0): class \"[\\\\a\\\\pL_]\"", `1:8 (7): eof ""`},
+ {"1:1 (0): class \"[\\\\a\\\\p{Greek}]\"", `1:13 (12): eof ""`},
+ {"1:1 (0): code \"{}\"", `1:2 (1): eof ""`},
+ {"1:1 (0): code \"{a}\"", `1:3 (2): eof ""`},
+ {"1:1 (0): code \"{a}\"", "1:4 (3): ident \"i\"", `1:4 (3): eof ""`},
+ {"1:1 (0): code \"{\\nif something {\\n\\tdoSomething()\\n}\\n}\"", `5:1 (34): eof ""`},
+ {"1:1 (0): lcomment \"// a\"", `1:4 (3): eof ""`},
+ {"1:1 (0): lcomment \"// a\"", `2:0 (4): eol "\n"`, `2:1 (5): ident "b"`, `2:1 (5): eof ""`},
+ {"1:1 (0): slash \"/\"", `1:2 (1): ident "a"`, `1:2 (1): eof ""`},
+ {"1:1 (0): slash \"/\"", `2:0 (1): eol "\n"`, `2:0 (1): eof ""`},
+ {"1:1 (0): mlcomment \"/**/\"", `1:4 (3): eof ""`},
+ {"1:1 (0): mlcomment \"/*a*/\"", `1:5 (4): eof ""`},
+ {"1:1 (0): mlcomment \"/*a\\nb*/\"", `2:3 (6): eof ""`},
+ {"1:1 (0): colon \":\"", `1:1 (0): eof ""`},
+ {"1:1 (0): semicolon \";\"", `1:1 (0): eof ""`},
+ {"1:1 (0): lparen \"(\"", `1:1 (0): eof ""`},
+ {"1:1 (0): rparen \")\"", `1:1 (0): eof ""`},
+ {"1:1 (0): dot \".\"", `1:1 (0): eof ""`},
+ {"1:1 (0): ampersand \"&\"", `1:1 (0): eof ""`},
+ {"1:1 (0): exclamation \"!\"", `1:1 (0): eof ""`},
+ {"1:1 (0): question \"?\"", `1:1 (0): eof ""`},
+ {"1:1 (0): plus \"+\"", `1:1 (0): eof ""`},
+ {"1:1 (0): star \"*\"", `1:1 (0): eof ""`},
+ {"2:0 (0): eol \"\\n\"", `2:0 (0): eof ""`},
+ {"1:1 (0): ident \"pockage\"", `1:9 (8): ruledef "="`, `1:11 (10): ident "a"`, `1:11 (10): eof ""`},
+ {
+ `1:1 (0): ident "Rule"`,
+ `1:6 (5): ruledef "<-"`,
+ `2:0 (7): eol "\n"`,
+ `2:2 (9): ident "E"`,
+ `2:4 (11): slash "/"`,
+ `2:6 (13): lparen "("`,
+ `2:8 (15): char "'a'"`,
+ `2:11 (18): question "?"`,
+ `2:13 (20): str "\"bcd\"i"`,
+ `2:20 (27): rparen ")"`,
+ `2:21 (28): plus "+"`,
+ `2:23 (30): slash "/"`,
+ `2:25 (32): class "[efg-j]"`,
+ `2:32 (39): star "*"`,
+ `2:34 (41): code "{ println() }"`,
+ `2:48 (55): lcomment "// comment"`,
+ `3:0 (65): eol "\n"`,
+ `3:2 (67): slash "/"`,
+ `3:4 (69): ampersand "&"`,
+ `3:5 (70): char "'\\xff'"`,
+ `3:12 (77): mlcomment "/* and\nsome\ncomment\n*/"`,
+ `6:2 (98): eof ""`,
+ },
+}
+
+type errsink struct {
+ errs []error
+ pos []ast.Pos
+}
+
+func (e *errsink) add(p ast.Pos, err error) {
+ e.errs = append(e.errs, err)
+ e.pos = append(e.pos, p)
+}
+
+func (e *errsink) reset() {
+ e.errs = e.errs[:0]
+ e.pos = e.pos[:0]
+}
+
+func (e *errsink) StringAt(i int) string {
+ if i < 0 || i >= len(e.errs) {
+ return ""
+ }
+ return fmt.Sprintf("%s: %s", e.pos[i], e.errs[i])
+}
+
+func TestScanValid(t *testing.T) {
+ old := tokenStringLen
+ tokenStringLen = 100
+ defer func() { tokenStringLen = old }()
+
+ var s Scanner
+ var errh errsink
+ for i, c := range scanValidCases {
+ errh.reset()
+ s.Init("", strings.NewReader(c), errh.add)
+
+ j := 0
+ for {
+ tok, ok := s.Scan()
+ if j < len(scanExpTokens[i]) {
+ got := tok.String()
+ want := scanExpTokens[i][j]
+ if got != want {
+ t.Errorf("%d: token %d: want %q, got %q", i, j, want, got)
+ }
+ } else {
+ t.Errorf("%d: want %d tokens, got #%d", i, len(scanExpTokens[i]), j+1)
+ }
+ if !ok {
+ if j < len(scanExpTokens[i])-1 {
+ t.Errorf("%d: wand %d tokens, got only %d", i, len(scanExpTokens[i]), j+1)
+ }
+ break
+ }
+ j++
+ }
+ if len(errh.errs) != 0 {
+ t.Errorf("%d: want no error, got %d", i, len(errh.errs))
+ t.Log(errh.errs)
+ }
+ }
+}
+
+var scanInvalidCases = []string{
+ "|",
+ "<",
+ "'",
+ "''",
+ "'ab'",
+ `'\xff\U00001234'`,
+ `'\pA'`,
+ `'\z'`,
+ "'\\\n",
+ `'\xg'`,
+ `'\129'`,
+ `'\12`,
+ `'\xa`,
+ `'\u123z'`,
+ `'\u12`,
+ `'\UFFFFffff'`,
+ `'\uD800'`,
+ `'\ue000'`,
+ `'\ud901'`,
+ `'\"'`,
+ "\"\n",
+ "\"",
+ "\"\\'\"",
+ "`",
+ "[",
+ "[\\\"",
+ `[\[]`,
+ `[\p]`,
+ `[\p{]`,
+ `[\p{`,
+ `[\p{}]`,
+ `{code{}`,
+ `/*a*`,
+ `/*a`,
+ `func`,
+}
+
+var scanExpErrs = [][]string{
+ {"1:1 (0): invalid character U+007C '|'"},
+ {"1:1 (0): rule definition not terminated"},
+ {"1:1 (0): rune literal not terminated"},
+ {"1:2 (1): rune literal is not a single rune"},
+ {"1:4 (3): rune literal is not a single rune"},
+ {"1:16 (15): rune literal is not a single rune"},
+ {"1:3 (2): unknown escape sequence",
+ "1:5 (4): rune literal is not a single rune"},
+ {"1:3 (2): unknown escape sequence"},
+ {"2:0 (2): escape sequence not terminated",
+ "2:0 (2): rune literal not terminated"},
+ {"1:4 (3): illegal character U+0067 'g' in escape sequence"},
+ {"1:5 (4): illegal character U+0039 '9' in escape sequence"},
+ {"1:4 (3): escape sequence not terminated",
+ "1:4 (3): rune literal not terminated"},
+ {"1:4 (3): escape sequence not terminated",
+ "1:4 (3): rune literal not terminated"},
+ {"1:7 (6): illegal character U+007A 'z' in escape sequence"},
+ {"1:5 (4): escape sequence not terminated",
+ "1:5 (4): rune literal not terminated"},
+ {"1:11 (10): escape sequence is invalid Unicode code point"},
+ {"1:7 (6): escape sequence is invalid Unicode code point"},
+ {"1:7 (6): escape sequence is invalid Unicode code point"},
+ {"1:7 (6): escape sequence is invalid Unicode code point"},
+ {"1:3 (2): unknown escape sequence"},
+ {"2:0 (1): string literal not terminated"},
+ {"1:1 (0): string literal not terminated"},
+ {"1:3 (2): unknown escape sequence"},
+ {"1:1 (0): raw string literal not terminated"},
+ {"1:1 (0): character class not terminated"},
+ {"1:3 (2): unknown escape sequence",
+ "1:3 (2): character class not terminated"},
+ {"1:3 (2): unknown escape sequence"},
+ {"1:4 (3): character class not terminated"},
+ {"1:5 (4): escape sequence not terminated",
+ "1:5 (4): character class not terminated"},
+ {"1:4 (3): escape sequence not terminated",
+ "1:4 (3): character class not terminated"},
+ {"1:5 (4): empty Unicode character class escape sequence"},
+ {"1:7 (6): code block not terminated"},
+ {"1:4 (3): comment not terminated"},
+ {"1:3 (2): comment not terminated"},
+ {"1:1 (0): illegal identifier \"func\""},
+}
+
+func TestScanInvalid(t *testing.T) {
+ var s Scanner
+ var errh errsink
+ for i, c := range scanInvalidCases {
+ errh.reset()
+ s.Init("", strings.NewReader(c), errh.add)
+ for {
+ if _, ok := s.Scan(); !ok {
+ break
+ }
+ }
+ if len(errh.errs) != len(scanExpErrs[i]) {
+ t.Errorf("%d: want %d errors, got %d", i, len(scanExpErrs[i]), len(errh.errs))
+ continue
+ }
+ for j := range errh.errs {
+ want := scanExpErrs[i][j]
+ got := errh.StringAt(j)
+ if want != got {
+ t.Errorf("%d: error %d: want %q, got %q", i, j, want, got)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/bootstrap/token.go b/vendor/github.com/PuerkitoBio/pigeon/bootstrap/token.go
new file mode 100644
index 0000000000..982d5181ce
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/bootstrap/token.go
@@ -0,0 +1,161 @@
+package bootstrap
+
+import (
+ "fmt"
+
+ "github.com/PuerkitoBio/pigeon/ast"
+)
+
+type tid int
+
+const (
+ invalid tid = iota - 1
+ eof // end-of-file token, id 0
+
+ ident tid = iota + 127 // identifiers follow the same rules as Go
+ ruledef // rule definition token
+
+ // literals
+ char // character literal, as in Go ('a'i?)
+ str // double-quoted string literal, as in Go ("string"i?)
+ rstr // back-tick quoted raw string literal, as in Go (`string`i?)
+ class // square-brackets character classes ([a\n\t]i?)
+ lcomment // line comment as in Go (// comment or /* comment */ with no newline)
+ mlcomment // multi-line comment as in Go (/* comment */)
+ code // code blocks between '{' and '}'
+
+ // operators and delimiters have the value of their char
+ // smallest value in that category is 10, for '\n'
+ eol tid = '\n' // end-of-line token, required in the parser
+ colon tid = ':' // separate variable name from expression ':'
+ semicolon tid = ';' // optional ';' to terminate rules
+ lparen tid = '(' // parenthesis to group expressions '('
+ rparen tid = ')' // ')'
+ dot tid = '.' // any matcher '.'
+ ampersand tid = '&' // and-predicate '&'
+ exclamation tid = '!' // not-predicate '!'
+ question tid = '?' // zero-or-one '?'
+ plus tid = '+' // one-or-more '+'
+ star tid = '*' // zero-or-more '*'
+ slash tid = '/' // ordered choice '/'
+)
+
+var lookup = map[tid]string{
+ invalid: "invalid",
+ eof: "eof",
+ ident: "ident",
+ ruledef: "ruledef",
+ char: "char",
+ str: "str",
+ rstr: "rstr",
+ class: "class",
+ lcomment: "lcomment",
+ mlcomment: "mlcomment",
+ code: "code",
+ eol: "eol",
+ colon: "colon",
+ semicolon: "semicolon",
+ lparen: "lparen",
+ rparen: "rparen",
+ dot: "dot",
+ ampersand: "ampersand",
+ exclamation: "exclamation",
+ question: "question",
+ plus: "plus",
+ star: "star",
+ slash: "slash",
+}
+
+func (t tid) String() string {
+ if s, ok := lookup[t]; ok {
+ return s
+ }
+ return fmt.Sprintf("tid(%d)", t)
+}
+
+var blacklistedIdents = map[string]struct{}{
+ // Go keywords http://golang.org/ref/spec#Keywords
+ "break": struct{}{},
+ "case": struct{}{},
+ "chan": struct{}{},
+ "const": struct{}{},
+ "continue": struct{}{},
+ "default": struct{}{},
+ "defer": struct{}{},
+ "else": struct{}{},
+ "fallthrough": struct{}{},
+ "for": struct{}{},
+ "func": struct{}{},
+ "go": struct{}{},
+ "goto": struct{}{},
+ "if": struct{}{},
+ "import": struct{}{},
+ "interface": struct{}{},
+ "map": struct{}{},
+ "package": struct{}{},
+ "range": struct{}{},
+ "return": struct{}{},
+ "select": struct{}{},
+ "struct": struct{}{},
+ "switch": struct{}{},
+ "type": struct{}{},
+ "var": struct{}{},
+
+ // predeclared identifiers http://golang.org/ref/spec#Predeclared_identifiers
+ "bool": struct{}{},
+ "byte": struct{}{},
+ "complex64": struct{}{},
+ "complex128": struct{}{},
+ "error": struct{}{},
+ "float32": struct{}{},
+ "float64": struct{}{},
+ "int": struct{}{},
+ "int8": struct{}{},
+ "int16": struct{}{},
+ "int32": struct{}{},
+ "int64": struct{}{},
+ "rune": struct{}{},
+ "string": struct{}{},
+ "uint": struct{}{},
+ "uint8": struct{}{},
+ "uint16": struct{}{},
+ "uint32": struct{}{},
+ "uint64": struct{}{},
+ "uintptr": struct{}{},
+ "true": struct{}{},
+ "false": struct{}{},
+ "iota": struct{}{},
+ "nil": struct{}{},
+ "append": struct{}{},
+ "cap": struct{}{},
+ "close": struct{}{},
+ "complex": struct{}{},
+ "copy": struct{}{},
+ "delete": struct{}{},
+ "imag": struct{}{},
+ "len": struct{}{},
+ "make": struct{}{},
+ "new": struct{}{},
+ "panic": struct{}{},
+ "print": struct{}{},
+ "println": struct{}{},
+ "real": struct{}{},
+ "recover": struct{}{},
+}
+
+// Token is a syntactic token generated by the scanner.
+type Token struct {
+ id tid
+ lit string
+ pos ast.Pos
+}
+
+var tokenStringLen = 50
+
+func (t Token) String() string {
+ v := t.lit
+ if len(v) > tokenStringLen {
+ v = v[:tokenStringLen/2] + "[...]" + v[len(v)-(tokenStringLen/2):len(v)]
+ }
+ return fmt.Sprintf("%s: %s %q", t.pos, t.id, v)
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/builder/builder.go b/vendor/github.com/PuerkitoBio/pigeon/builder/builder.go
new file mode 100644
index 0000000000..4fccadbc4d
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/builder/builder.go
@@ -0,0 +1,573 @@
+// Package builder generates the parser code for a given grammar. It makes
+// no attempt to verify the correctness of the grammar.
+package builder
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "unicode"
+
+ "github.com/PuerkitoBio/pigeon/ast"
+)
+
+// generated function templates
+var (
+ onFuncTemplate = `func (%s *current) %s(%s) (interface{}, error) {
+%s
+}
+`
+ onPredFuncTemplate = `func (%s *current) %s(%s) (bool, error) {
+%s
+}
+`
+ callFuncTemplate = `func (p *parser) call%s() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.%[1]s(%s)
+}
+`
+ callPredFuncTemplate = `func (p *parser) call%s() (bool, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.%[1]s(%s)
+}
+`
+)
+
+// Option is a function that can set an option on the builder. It returns
+// the previous setting as an Option.
+type Option func(*builder) Option
+
+// ReceiverName returns an option that specifies the receiver name to
+// use for the current struct (which is the struct on which all code blocks
+// except the initializer are generated).
+func ReceiverName(nm string) Option {
+ return func(b *builder) Option {
+ prev := b.recvName
+ b.recvName = nm
+ return ReceiverName(prev)
+ }
+}
+
+// BuildParser builds the PEG parser using the provider grammar. The code is
+// written to the specified w.
+func BuildParser(w io.Writer, g *ast.Grammar, opts ...Option) error {
+ b := &builder{w: w, recvName: "c"}
+ b.setOptions(opts)
+ return b.buildParser(g)
+}
+
+type builder struct {
+ w io.Writer
+ err error
+
+ // options
+ recvName string
+
+ ruleName string
+ exprIndex int
+ argsStack [][]string
+}
+
+func (b *builder) setOptions(opts []Option) {
+ for _, opt := range opts {
+ opt(b)
+ }
+}
+
+func (b *builder) buildParser(g *ast.Grammar) error {
+ b.writeInit(g.Init)
+ b.writeGrammar(g)
+
+ for _, rule := range g.Rules {
+ b.writeRuleCode(rule)
+ }
+ b.writeStaticCode()
+
+ return b.err
+}
+
+func (b *builder) writeInit(init *ast.CodeBlock) {
+ if init == nil {
+ return
+ }
+
+ // remove opening and closing braces
+ val := init.Val[1 : len(init.Val)-1]
+ b.writelnf("%s", val)
+}
+
+func (b *builder) writeGrammar(g *ast.Grammar) {
+ // transform the ast grammar to the self-contained, no dependency version
+ // of the parser-generator grammar.
+ b.writelnf("var g = &grammar {")
+ b.writelnf("\trules: []*rule{")
+ for _, r := range g.Rules {
+ b.writeRule(r)
+ }
+ b.writelnf("\t},")
+ b.writelnf("}")
+}
+
+func (b *builder) writeRule(r *ast.Rule) {
+ if r == nil || r.Name == nil {
+ return
+ }
+
+ b.exprIndex = 0
+ b.ruleName = r.Name.Val
+
+ b.writelnf("{")
+ b.writelnf("\tname: %q,", r.Name.Val)
+ if r.DisplayName != nil && r.DisplayName.Val != "" {
+ b.writelnf("\tdisplayName: %q,", r.DisplayName.Val)
+ }
+ pos := r.Pos()
+ b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off)
+ b.writef("\texpr: ")
+ b.writeExpr(r.Expr)
+ b.writelnf("},")
+}
+
+func (b *builder) writeExpr(expr ast.Expression) {
+ b.exprIndex++
+ switch expr := expr.(type) {
+ case *ast.ActionExpr:
+ b.writeActionExpr(expr)
+ case *ast.AndCodeExpr:
+ b.writeAndCodeExpr(expr)
+ case *ast.AndExpr:
+ b.writeAndExpr(expr)
+ case *ast.AnyMatcher:
+ b.writeAnyMatcher(expr)
+ case *ast.CharClassMatcher:
+ b.writeCharClassMatcher(expr)
+ case *ast.ChoiceExpr:
+ b.writeChoiceExpr(expr)
+ case *ast.LabeledExpr:
+ b.writeLabeledExpr(expr)
+ case *ast.LitMatcher:
+ b.writeLitMatcher(expr)
+ case *ast.NotCodeExpr:
+ b.writeNotCodeExpr(expr)
+ case *ast.NotExpr:
+ b.writeNotExpr(expr)
+ case *ast.OneOrMoreExpr:
+ b.writeOneOrMoreExpr(expr)
+ case *ast.RuleRefExpr:
+ b.writeRuleRefExpr(expr)
+ case *ast.SeqExpr:
+ b.writeSeqExpr(expr)
+ case *ast.ZeroOrMoreExpr:
+ b.writeZeroOrMoreExpr(expr)
+ case *ast.ZeroOrOneExpr:
+ b.writeZeroOrOneExpr(expr)
+ default:
+ b.err = fmt.Errorf("builder: unknown expression type %T", expr)
+ }
+}
+
+func (b *builder) writeActionExpr(act *ast.ActionExpr) {
+ if act == nil {
+ b.writelnf("nil,")
+ return
+ }
+ act.FuncIx = b.exprIndex
+ b.writelnf("&actionExpr{")
+ pos := act.Pos()
+ b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off)
+ b.writelnf("\trun: (*parser).call%s,", b.funcName(act.FuncIx))
+ b.writef("\texpr: ")
+ b.writeExpr(act.Expr)
+ b.writelnf("},")
+}
+
+func (b *builder) writeAndCodeExpr(and *ast.AndCodeExpr) {
+ if and == nil {
+ b.writelnf("nil,")
+ return
+ }
+ b.writelnf("&andCodeExpr{")
+ pos := and.Pos()
+ and.FuncIx = b.exprIndex
+ b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off)
+ b.writelnf("\trun: (*parser).call%s,", b.funcName(and.FuncIx))
+ b.writelnf("},")
+}
+
+func (b *builder) writeAndExpr(and *ast.AndExpr) {
+ if and == nil {
+ b.writelnf("nil,")
+ return
+ }
+ b.writelnf("&andExpr{")
+ pos := and.Pos()
+ b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off)
+ b.writef("\texpr: ")
+ b.writeExpr(and.Expr)
+ b.writelnf("},")
+}
+
+func (b *builder) writeAnyMatcher(any *ast.AnyMatcher) {
+ if any == nil {
+ b.writelnf("nil,")
+ return
+ }
+ b.writelnf("&anyMatcher{")
+ pos := any.Pos()
+ b.writelnf("\tline: %d, col: %d, offset: %d,", pos.Line, pos.Col, pos.Off)
+ b.writelnf("},")
+}
+
+func (b *builder) writeCharClassMatcher(ch *ast.CharClassMatcher) {
+ if ch == nil {
+ b.writelnf("nil,")
+ return
+ }
+ b.writelnf("&charClassMatcher{")
+ pos := ch.Pos()
+ b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off)
+ b.writelnf("\tval: %q,", ch.Val)
+ if len(ch.Chars) > 0 {
+ b.writef("\tchars: []rune{")
+ for _, rn := range ch.Chars {
+ if ch.IgnoreCase {
+ b.writef("%q,", unicode.ToLower(rn))
+ } else {
+ b.writef("%q,", rn)
+ }
+ }
+ b.writelnf("},")
+ }
+ if len(ch.Ranges) > 0 {
+ b.writef("\tranges: []rune{")
+ for _, rn := range ch.Ranges {
+ if ch.IgnoreCase {
+ b.writef("%q,", unicode.ToLower(rn))
+ } else {
+ b.writef("%q,", rn)
+ }
+ }
+ b.writelnf("},")
+ }
+ if len(ch.UnicodeClasses) > 0 {
+ b.writef("\tclasses: []*unicode.RangeTable{")
+ for _, cl := range ch.UnicodeClasses {
+ b.writef("rangeTable(%q),", cl)
+ }
+ b.writelnf("},")
+ }
+ b.writelnf("\tignoreCase: %t,", ch.IgnoreCase)
+ b.writelnf("\tinverted: %t,", ch.Inverted)
+ b.writelnf("},")
+}
+
+func (b *builder) writeChoiceExpr(ch *ast.ChoiceExpr) {
+ if ch == nil {
+ b.writelnf("nil,")
+ return
+ }
+ b.writelnf("&choiceExpr{")
+ pos := ch.Pos()
+ b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off)
+ if len(ch.Alternatives) > 0 {
+ b.writelnf("\talternatives: []interface{}{")
+ for _, alt := range ch.Alternatives {
+ b.writeExpr(alt)
+ }
+ b.writelnf("\t},")
+ }
+ b.writelnf("},")
+}
+
+func (b *builder) writeLabeledExpr(lab *ast.LabeledExpr) {
+ if lab == nil {
+ b.writelnf("nil,")
+ return
+ }
+ b.writelnf("&labeledExpr{")
+ pos := lab.Pos()
+ b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off)
+ if lab.Label != nil && lab.Label.Val != "" {
+ b.writelnf("\tlabel: %q,", lab.Label.Val)
+ }
+ b.writef("\texpr: ")
+ b.writeExpr(lab.Expr)
+ b.writelnf("},")
+}
+
+func (b *builder) writeLitMatcher(lit *ast.LitMatcher) {
+ if lit == nil {
+ b.writelnf("nil,")
+ return
+ }
+ b.writelnf("&litMatcher{")
+ pos := lit.Pos()
+ b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off)
+ if lit.IgnoreCase {
+ b.writelnf("\tval: %q,", strings.ToLower(lit.Val))
+ } else {
+ b.writelnf("\tval: %q,", lit.Val)
+ }
+ b.writelnf("\tignoreCase: %t,", lit.IgnoreCase)
+ b.writelnf("},")
+}
+
+func (b *builder) writeNotCodeExpr(not *ast.NotCodeExpr) {
+ if not == nil {
+ b.writelnf("nil,")
+ return
+ }
+ b.writelnf("¬CodeExpr{")
+ pos := not.Pos()
+ not.FuncIx = b.exprIndex
+ b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off)
+ b.writelnf("\trun: (*parser).call%s,", b.funcName(not.FuncIx))
+ b.writelnf("},")
+}
+
+func (b *builder) writeNotExpr(not *ast.NotExpr) {
+ if not == nil {
+ b.writelnf("nil,")
+ return
+ }
+ b.writelnf("¬Expr{")
+ pos := not.Pos()
+ b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off)
+ b.writef("\texpr: ")
+ b.writeExpr(not.Expr)
+ b.writelnf("},")
+}
+
+func (b *builder) writeOneOrMoreExpr(one *ast.OneOrMoreExpr) {
+ if one == nil {
+ b.writelnf("nil,")
+ return
+ }
+ b.writelnf("&oneOrMoreExpr{")
+ pos := one.Pos()
+ b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off)
+ b.writef("\texpr: ")
+ b.writeExpr(one.Expr)
+ b.writelnf("},")
+}
+
+func (b *builder) writeRuleRefExpr(ref *ast.RuleRefExpr) {
+ if ref == nil {
+ b.writelnf("nil,")
+ return
+ }
+ b.writelnf("&ruleRefExpr{")
+ pos := ref.Pos()
+ b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off)
+ if ref.Name != nil && ref.Name.Val != "" {
+ b.writelnf("\tname: %q,", ref.Name.Val)
+ }
+ b.writelnf("},")
+}
+
+func (b *builder) writeSeqExpr(seq *ast.SeqExpr) {
+ if seq == nil {
+ b.writelnf("nil,")
+ return
+ }
+ b.writelnf("&seqExpr{")
+ pos := seq.Pos()
+ b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off)
+ if len(seq.Exprs) > 0 {
+ b.writelnf("\texprs: []interface{}{")
+ for _, e := range seq.Exprs {
+ b.writeExpr(e)
+ }
+ b.writelnf("\t},")
+ }
+ b.writelnf("},")
+}
+
+func (b *builder) writeZeroOrMoreExpr(zero *ast.ZeroOrMoreExpr) {
+ if zero == nil {
+ b.writelnf("nil,")
+ return
+ }
+ b.writelnf("&zeroOrMoreExpr{")
+ pos := zero.Pos()
+ b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off)
+ b.writef("\texpr: ")
+ b.writeExpr(zero.Expr)
+ b.writelnf("},")
+}
+
+func (b *builder) writeZeroOrOneExpr(zero *ast.ZeroOrOneExpr) {
+ if zero == nil {
+ b.writelnf("nil,")
+ return
+ }
+ b.writelnf("&zeroOrOneExpr{")
+ pos := zero.Pos()
+ b.writelnf("\tpos: position{line: %d, col: %d, offset: %d},", pos.Line, pos.Col, pos.Off)
+ b.writef("\texpr: ")
+ b.writeExpr(zero.Expr)
+ b.writelnf("},")
+}
+
+func (b *builder) writeRuleCode(rule *ast.Rule) {
+ if rule == nil || rule.Name == nil {
+ return
+ }
+
+ // keep trace of the current rule, as the code blocks are created
+ // in functions named "on<#ExprIndex>".
+ b.ruleName = rule.Name.Val
+ b.pushArgsSet()
+ b.writeExprCode(rule.Expr)
+ b.popArgsSet()
+}
+
+func (b *builder) pushArgsSet() {
+ b.argsStack = append(b.argsStack, nil)
+}
+
+func (b *builder) popArgsSet() {
+ b.argsStack = b.argsStack[:len(b.argsStack)-1]
+}
+
+func (b *builder) addArg(arg *ast.Identifier) {
+ if arg == nil {
+ return
+ }
+ ix := len(b.argsStack) - 1
+ b.argsStack[ix] = append(b.argsStack[ix], arg.Val)
+}
+
+func (b *builder) writeExprCode(expr ast.Expression) {
+ switch expr := expr.(type) {
+ case *ast.ActionExpr:
+ b.writeExprCode(expr.Expr)
+ b.writeActionExprCode(expr)
+
+ case *ast.AndCodeExpr:
+ b.writeAndCodeExprCode(expr)
+
+ case *ast.LabeledExpr:
+ b.addArg(expr.Label)
+ b.pushArgsSet()
+ b.writeExprCode(expr.Expr)
+ b.popArgsSet()
+
+ case *ast.NotCodeExpr:
+ b.writeNotCodeExprCode(expr)
+
+ case *ast.AndExpr:
+ b.pushArgsSet()
+ b.writeExprCode(expr.Expr)
+ b.popArgsSet()
+ case *ast.ChoiceExpr:
+ for _, alt := range expr.Alternatives {
+ b.pushArgsSet()
+ b.writeExprCode(alt)
+ b.popArgsSet()
+ }
+ case *ast.NotExpr:
+ b.pushArgsSet()
+ b.writeExprCode(expr.Expr)
+ b.popArgsSet()
+ case *ast.OneOrMoreExpr:
+ b.pushArgsSet()
+ b.writeExprCode(expr.Expr)
+ b.popArgsSet()
+ case *ast.SeqExpr:
+ for _, sub := range expr.Exprs {
+ b.writeExprCode(sub)
+ }
+ case *ast.ZeroOrMoreExpr:
+ b.pushArgsSet()
+ b.writeExprCode(expr.Expr)
+ b.popArgsSet()
+ case *ast.ZeroOrOneExpr:
+ b.pushArgsSet()
+ b.writeExprCode(expr.Expr)
+ b.popArgsSet()
+ }
+}
+
+func (b *builder) writeActionExprCode(act *ast.ActionExpr) {
+ if act == nil {
+ return
+ }
+ b.writeFunc(act.FuncIx, act.Code, callFuncTemplate, onFuncTemplate)
+}
+
+func (b *builder) writeAndCodeExprCode(and *ast.AndCodeExpr) {
+ if and == nil {
+ return
+ }
+ b.writeFunc(and.FuncIx, and.Code, callPredFuncTemplate, onPredFuncTemplate)
+}
+
+func (b *builder) writeNotCodeExprCode(not *ast.NotCodeExpr) {
+ if not == nil {
+ return
+ }
+ b.writeFunc(not.FuncIx, not.Code, callPredFuncTemplate, onPredFuncTemplate)
+}
+
+func (b *builder) writeFunc(funcIx int, code *ast.CodeBlock, callTpl, funcTpl string) {
+ if code == nil {
+ return
+ }
+ val := strings.TrimSpace(code.Val)[1 : len(code.Val)-1]
+ if len(val) > 0 && val[0] == '\n' {
+ val = val[1:]
+ }
+ if len(val) > 0 && val[len(val)-1] == '\n' {
+ val = val[:len(val)-1]
+ }
+ var args bytes.Buffer
+ ix := len(b.argsStack) - 1
+ if ix >= 0 {
+ for i, arg := range b.argsStack[ix] {
+ if i > 0 {
+ args.WriteString(", ")
+ }
+ args.WriteString(arg)
+ }
+ }
+ if args.Len() > 0 {
+ args.WriteString(" interface{}")
+ }
+
+ fnNm := b.funcName(funcIx)
+ b.writelnf(funcTpl, b.recvName, fnNm, args.String(), val)
+
+ args.Reset()
+ if ix >= 0 {
+ for i, arg := range b.argsStack[ix] {
+ if i > 0 {
+ args.WriteString(", ")
+ }
+ args.WriteString(fmt.Sprintf(`stack[%q]`, arg))
+ }
+ }
+ b.writelnf(callTpl, fnNm, args.String())
+}
+
+func (b *builder) writeStaticCode() {
+ b.writelnf(staticCode)
+}
+
+func (b *builder) funcName(ix int) string {
+ return "on" + b.ruleName + strconv.Itoa(ix)
+}
+
+func (b *builder) writef(f string, args ...interface{}) {
+ if b.err == nil {
+ _, b.err = fmt.Fprintf(b.w, f, args...)
+ }
+}
+
+func (b *builder) writelnf(f string, args ...interface{}) {
+ b.writef(f+"\n", args...)
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/builder/builder_test.go b/vendor/github.com/PuerkitoBio/pigeon/builder/builder_test.go
new file mode 100644
index 0000000000..c119886af1
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/builder/builder_test.go
@@ -0,0 +1,40 @@
+package builder
+
+import (
+ "io/ioutil"
+ "strings"
+ "testing"
+
+ "github.com/PuerkitoBio/pigeon/bootstrap"
+)
+
+var grammar = `
+{
+var test = "some string"
+
+func init() {
+ fmt.Println("this is inside the init")
+}
+}
+
+start = additive eof
+additive = left:multiplicative "+" space right:additive {
+ fmt.Println(left, right)
+} / mul:multiplicative { fmt.Println(mul) }
+multiplicative = left:primary op:"*" space right:multiplicative { fmt.Println(left, right, op) } / primary
+primary = integer / "(" space additive:additive ")" space { fmt.Println(additive) }
+integer "integer" = digits:[0123456789]+ space { fmt.Println(digits) }
+space = ' '*
+eof = !. { fmt.Println("eof") }
+`
+
+func TestBuildParser(t *testing.T) {
+ p := bootstrap.NewParser()
+ g, err := p.Parse("", strings.NewReader(grammar))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := BuildParser(ioutil.Discard, g); err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/builder/static_code.go b/vendor/github.com/PuerkitoBio/pigeon/builder/static_code.go
new file mode 100644
index 0000000000..d81b592204
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/builder/static_code.go
@@ -0,0 +1,867 @@
+package builder
+
+var staticCode = `
+var (
+ // errNoRule is returned when the grammar to parse has no rule.
+ errNoRule = errors.New("grammar has no rule")
+
+ // errInvalidEncoding is returned when the source is not properly
+ // utf8-encoded.
+ errInvalidEncoding = errors.New("invalid encoding")
+
+ // errNoMatch is returned if no match could be found.
+ errNoMatch = errors.New("no match found")
+)
+
+// Option is a function that can set an option on the parser. It returns
+// the previous setting as an Option.
+type Option func(*parser) Option
+
+// Debug creates an Option to set the debug flag to b. When set to true,
+// debugging information is printed to stdout while parsing.
+//
+// The default is false.
+func Debug(b bool) Option {
+ return func(p *parser) Option {
+ old := p.debug
+ p.debug = b
+ return Debug(old)
+ }
+}
+
+// Memoize creates an Option to set the memoize flag to b. When set to true,
+// the parser will cache all results so each expression is evaluated only
+// once. This guarantees linear parsing time even for pathological cases,
+// at the expense of more memory and slower times for typical cases.
+//
+// The default is false.
+func Memoize(b bool) Option {
+ return func(p *parser) Option {
+ old := p.memoize
+ p.memoize = b
+ return Memoize(old)
+ }
+}
+
+// Recover creates an Option to set the recover flag to b. When set to
+// true, this causes the parser to recover from panics and convert it
+// to an error. Setting it to false can be useful while debugging to
+// access the full stack trace.
+//
+// The default is true.
+func Recover(b bool) Option {
+ return func(p *parser) Option {
+ old := p.recover
+ p.recover = b
+ return Recover(old)
+ }
+}
+
+// ParseFile parses the file identified by filename.
+func ParseFile(filename string, opts ...Option) (interface{}, error) {
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ return ParseReader(filename, f, opts...)
+}
+
+// ParseReader parses the data from r using filename as information in the
+// error messages.
+func ParseReader(filename string, r io.Reader, opts ...Option) (interface{}, error) {
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+
+ return Parse(filename, b, opts...)
+}
+
+// Parse parses the data from b using filename as information in the
+// error messages.
+func Parse(filename string, b []byte, opts ...Option) (interface{}, error) {
+ return newParser(filename, b, opts...).parse(g)
+}
+
+// position records a position in the text.
+type position struct {
+ line, col, offset int
+}
+
+func (p position) String() string {
+ return fmt.Sprintf("%%d:%%d [%%d]", p.line, p.col, p.offset)
+}
+
+// savepoint stores all state required to go back to this point in the
+// parser.
+type savepoint struct {
+ position
+ rn rune
+ w int
+}
+
+type current struct {
+ pos position // start position of the match
+ text []byte // raw text of the match
+}
+
+// the AST types...
+
+type grammar struct {
+ pos position
+ rules []*rule
+}
+
+type rule struct {
+ pos position
+ name string
+ displayName string
+ expr interface{}
+}
+
+type choiceExpr struct {
+ pos position
+ alternatives []interface{}
+}
+
+type actionExpr struct {
+ pos position
+ expr interface{}
+ run func(*parser) (interface{}, error)
+}
+
+type seqExpr struct {
+ pos position
+ exprs []interface{}
+}
+
+type labeledExpr struct {
+ pos position
+ label string
+ expr interface{}
+}
+
+type expr struct {
+ pos position
+ expr interface{}
+}
+
+type andExpr expr
+type notExpr expr
+type zeroOrOneExpr expr
+type zeroOrMoreExpr expr
+type oneOrMoreExpr expr
+
+type ruleRefExpr struct {
+ pos position
+ name string
+}
+
+type andCodeExpr struct {
+ pos position
+ run func(*parser) (bool, error)
+}
+
+type notCodeExpr struct {
+ pos position
+ run func(*parser) (bool, error)
+}
+
+type litMatcher struct {
+ pos position
+ val string
+ ignoreCase bool
+}
+
+type charClassMatcher struct {
+ pos position
+ val string
+ chars []rune
+ ranges []rune
+ classes []*unicode.RangeTable
+ ignoreCase bool
+ inverted bool
+}
+
+type anyMatcher position
+
+// errList cumulates the errors found by the parser.
+type errList []error
+
+func (e *errList) add(err error) {
+ *e = append(*e, err)
+}
+
+func (e errList) err() error {
+ if len(e) == 0 {
+ return nil
+ }
+ e.dedupe()
+ return e
+}
+
+func (e *errList) dedupe() {
+ var cleaned []error
+ set := make(map[string]bool)
+ for _, err := range *e {
+ if msg := err.Error(); !set[msg] {
+ set[msg] = true
+ cleaned = append(cleaned, err)
+ }
+ }
+ *e = cleaned
+}
+
+func (e errList) Error() string {
+ switch len(e) {
+ case 0:
+ return ""
+ case 1:
+ return e[0].Error()
+ default:
+ var buf bytes.Buffer
+
+ for i, err := range e {
+ if i > 0 {
+ buf.WriteRune('\n')
+ }
+ buf.WriteString(err.Error())
+ }
+ return buf.String()
+ }
+}
+
+// parserError wraps an error with a prefix indicating the rule in which
+// the error occurred. The original error is stored in the Inner field.
+type parserError struct {
+ Inner error
+ pos position
+ prefix string
+}
+
+// Error returns the error message.
+func (p *parserError) Error() string {
+ return p.prefix + ": " + p.Inner.Error()
+}
+
+// newParser creates a parser with the specified input source and options.
+func newParser(filename string, b []byte, opts ...Option) *parser {
+ p := &parser{
+ filename: filename,
+ errs: new(errList),
+ data: b,
+ pt: savepoint{position: position{line: 1}},
+ recover: true,
+ }
+ p.setOptions(opts)
+ return p
+}
+
+// setOptions applies the options to the parser.
+func (p *parser) setOptions(opts []Option) {
+ for _, opt := range opts {
+ opt(p)
+ }
+}
+
+type resultTuple struct {
+ v interface{}
+ b bool
+ end savepoint
+}
+
+type parser struct {
+ filename string
+ pt savepoint
+ cur current
+
+ data []byte
+ errs *errList
+
+ recover bool
+ debug bool
+ depth int
+
+ memoize bool
+ // memoization table for the packrat algorithm:
+ // map[offset in source] map[expression or rule] {value, match}
+ memo map[int]map[interface{}]resultTuple
+
+ // rules table, maps the rule identifier to the rule node
+ rules map[string]*rule
+ // variables stack, map of label to value
+ vstack []map[string]interface{}
+ // rule stack, allows identification of the current rule in errors
+ rstack []*rule
+
+ // stats
+ exprCnt int
+}
+
+// push a variable set on the vstack.
+func (p *parser) pushV() {
+ if cap(p.vstack) == len(p.vstack) {
+ // create new empty slot in the stack
+ p.vstack = append(p.vstack, nil)
+ } else {
+ // slice to 1 more
+ p.vstack = p.vstack[:len(p.vstack)+1]
+ }
+
+ // get the last args set
+ m := p.vstack[len(p.vstack)-1]
+ if m != nil && len(m) == 0 {
+ // empty map, all good
+ return
+ }
+
+ m = make(map[string]interface{})
+ p.vstack[len(p.vstack)-1] = m
+}
+
+// pop a variable set from the vstack.
+func (p *parser) popV() {
+ // if the map is not empty, clear it
+ m := p.vstack[len(p.vstack)-1]
+ if len(m) > 0 {
+ // GC that map
+ p.vstack[len(p.vstack)-1] = nil
+ }
+ p.vstack = p.vstack[:len(p.vstack)-1]
+}
+
+func (p *parser) print(prefix, s string) string {
+ if !p.debug {
+ return s
+ }
+
+ fmt.Printf("%%s %%d:%%d:%%d: %%s [%%#U]\n",
+ prefix, p.pt.line, p.pt.col, p.pt.offset, s, p.pt.rn)
+ return s
+}
+
+func (p *parser) in(s string) string {
+ p.depth++
+ return p.print(strings.Repeat(" ", p.depth) + ">", s)
+}
+
+func (p *parser) out(s string) string {
+ p.depth--
+ return p.print(strings.Repeat(" ", p.depth) + "<", s)
+}
+
+func (p *parser) addErr(err error) {
+ p.addErrAt(err, p.pt.position)
+}
+
+func (p *parser) addErrAt(err error, pos position) {
+ var buf bytes.Buffer
+ if p.filename != "" {
+ buf.WriteString(p.filename)
+ }
+ if buf.Len() > 0 {
+ buf.WriteString(":")
+ }
+ buf.WriteString(fmt.Sprintf("%%d:%%d (%%d)", pos.line, pos.col, pos.offset))
+ if len(p.rstack) > 0 {
+ if buf.Len() > 0 {
+ buf.WriteString(": ")
+ }
+ rule := p.rstack[len(p.rstack)-1]
+ if rule.displayName != "" {
+ buf.WriteString("rule " + rule.displayName)
+ } else {
+ buf.WriteString("rule " + rule.name)
+ }
+ }
+ pe := &parserError{Inner: err, prefix: buf.String()}
+ p.errs.add(pe)
+}
+
+// read advances the parser to the next rune.
+func (p *parser) read() {
+ p.pt.offset += p.pt.w
+ rn, n := utf8.DecodeRune(p.data[p.pt.offset:])
+ p.pt.rn = rn
+ p.pt.w = n
+ p.pt.col++
+ if rn == '\n' {
+ p.pt.line++
+ p.pt.col = 0
+ }
+
+ if rn == utf8.RuneError {
+ if n > 0 {
+ p.addErr(errInvalidEncoding)
+ }
+ }
+}
+
+// restore parser position to the savepoint pt.
+func (p *parser) restore(pt savepoint) {
+ if p.debug {
+ defer p.out(p.in("restore"))
+ }
+ if pt.offset == p.pt.offset {
+ return
+ }
+ p.pt = pt
+}
+
+// get the slice of bytes from the savepoint start to the current position.
+func (p *parser) sliceFrom(start savepoint) []byte {
+ return p.data[start.position.offset:p.pt.position.offset]
+}
+
+func (p *parser) getMemoized(node interface{}) (resultTuple, bool) {
+ if len(p.memo) == 0 {
+ return resultTuple{}, false
+ }
+ m := p.memo[p.pt.offset]
+ if len(m) == 0 {
+ return resultTuple{}, false
+ }
+ res, ok := m[node]
+ return res, ok
+}
+
+func (p *parser) setMemoized(pt savepoint, node interface{}, tuple resultTuple) {
+ if p.memo == nil {
+ p.memo = make(map[int]map[interface{}]resultTuple)
+ }
+ m := p.memo[pt.offset]
+ if m == nil {
+ m = make(map[interface{}]resultTuple)
+ p.memo[pt.offset] = m
+ }
+ m[node] = tuple
+}
+
+func (p *parser) buildRulesTable(g *grammar) {
+ p.rules = make(map[string]*rule, len(g.rules))
+ for _, r := range g.rules {
+ p.rules[r.name] = r
+ }
+}
+
+func (p *parser) parse(g *grammar) (val interface{}, err error) {
+ if len(g.rules) == 0 {
+ p.addErr(errNoRule)
+ return nil, p.errs.err()
+ }
+
+ // TODO : not super critical but this could be generated
+ p.buildRulesTable(g)
+
+ if p.recover {
+ // panic can be used in action code to stop parsing immediately
+ // and return the panic as an error.
+ defer func() {
+ if e := recover(); e != nil {
+ if p.debug {
+ defer p.out(p.in("panic handler"))
+ }
+ val = nil
+ switch e := e.(type) {
+ case error:
+ p.addErr(e)
+ default:
+ p.addErr(fmt.Errorf("%%v", e))
+ }
+ err = p.errs.err()
+ }
+ }()
+ }
+
+ // start rule is rule [0]
+ p.read() // advance to first rune
+ val, ok := p.parseRule(g.rules[0])
+ if !ok {
+ if len(*p.errs) == 0 {
+ // make sure this doesn't go out silently
+ p.addErr(errNoMatch)
+ }
+ return nil, p.errs.err()
+ }
+ return val, p.errs.err()
+}
+
+func (p *parser) parseRule(rule *rule) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseRule " + rule.name))
+ }
+
+ if p.memoize {
+ res, ok := p.getMemoized(rule)
+ if ok {
+ p.restore(res.end)
+ return res.v, res.b
+ }
+ }
+
+ start := p.pt
+ p.rstack = append(p.rstack, rule)
+ p.pushV()
+ val, ok := p.parseExpr(rule.expr)
+ p.popV()
+ p.rstack = p.rstack[:len(p.rstack)-1]
+ if ok && p.debug {
+ p.print(strings.Repeat(" ", p.depth) + "MATCH", string(p.sliceFrom(start)))
+ }
+
+ if p.memoize {
+ p.setMemoized(start, rule, resultTuple{val, ok, p.pt})
+ }
+ return val, ok
+}
+
+func (p *parser) parseExpr(expr interface{}) (interface{}, bool) {
+ var pt savepoint
+ var ok bool
+
+ if p.memoize {
+ res, ok := p.getMemoized(expr)
+ if ok {
+ p.restore(res.end)
+ return res.v, res.b
+ }
+ pt = p.pt
+ }
+
+ p.exprCnt++
+ var val interface{}
+ switch expr := expr.(type) {
+ case *actionExpr:
+ val, ok = p.parseActionExpr(expr)
+ case *andCodeExpr:
+ val, ok = p.parseAndCodeExpr(expr)
+ case *andExpr:
+ val, ok = p.parseAndExpr(expr)
+ case *anyMatcher:
+ val, ok = p.parseAnyMatcher(expr)
+ case *charClassMatcher:
+ val, ok = p.parseCharClassMatcher(expr)
+ case *choiceExpr:
+ val, ok = p.parseChoiceExpr(expr)
+ case *labeledExpr:
+ val, ok = p.parseLabeledExpr(expr)
+ case *litMatcher:
+ val, ok = p.parseLitMatcher(expr)
+ case *notCodeExpr:
+ val, ok = p.parseNotCodeExpr(expr)
+ case *notExpr:
+ val, ok = p.parseNotExpr(expr)
+ case *oneOrMoreExpr:
+ val, ok = p.parseOneOrMoreExpr(expr)
+ case *ruleRefExpr:
+ val, ok = p.parseRuleRefExpr(expr)
+ case *seqExpr:
+ val, ok = p.parseSeqExpr(expr)
+ case *zeroOrMoreExpr:
+ val, ok = p.parseZeroOrMoreExpr(expr)
+ case *zeroOrOneExpr:
+ val, ok = p.parseZeroOrOneExpr(expr)
+ default:
+ panic(fmt.Sprintf("unknown expression type %%T", expr))
+ }
+ if p.memoize {
+ p.setMemoized(pt, expr, resultTuple{val, ok, p.pt})
+ }
+ return val, ok
+}
+
+func (p *parser) parseActionExpr(act *actionExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseActionExpr"))
+ }
+
+ start := p.pt
+ val, ok := p.parseExpr(act.expr)
+ if ok {
+ p.cur.pos = start.position
+ p.cur.text = p.sliceFrom(start)
+ actVal, err := act.run(p)
+ if err != nil {
+ p.addErrAt(err, start.position)
+ }
+ val = actVal
+ }
+ if ok && p.debug {
+ p.print(strings.Repeat(" ", p.depth) + "MATCH", string(p.sliceFrom(start)))
+ }
+ return val, ok
+}
+
+func (p *parser) parseAndCodeExpr(and *andCodeExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseAndCodeExpr"))
+ }
+
+ ok, err := and.run(p)
+ if err != nil {
+ p.addErr(err)
+ }
+ return nil, ok
+}
+
+func (p *parser) parseAndExpr(and *andExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseAndExpr"))
+ }
+
+ pt := p.pt
+ p.pushV()
+ _, ok := p.parseExpr(and.expr)
+ p.popV()
+ p.restore(pt)
+ return nil, ok
+}
+
+func (p *parser) parseAnyMatcher(any *anyMatcher) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseAnyMatcher"))
+ }
+
+ if p.pt.rn != utf8.RuneError {
+ start := p.pt
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ return nil, false
+}
+
+func (p *parser) parseCharClassMatcher(chr *charClassMatcher) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseCharClassMatcher"))
+ }
+
+ cur := p.pt.rn
+ // can't match EOF
+ if cur == utf8.RuneError {
+ return nil, false
+ }
+ start := p.pt
+ if chr.ignoreCase {
+ cur = unicode.ToLower(cur)
+ }
+
+ // try to match in the list of available chars
+ for _, rn := range chr.chars {
+ if rn == cur {
+ if chr.inverted {
+ return nil, false
+ }
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ }
+
+ // try to match in the list of ranges
+ for i := 0; i < len(chr.ranges); i += 2 {
+ if cur >= chr.ranges[i] && cur <= chr.ranges[i+1] {
+ if chr.inverted {
+ return nil, false
+ }
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ }
+
+ // try to match in the list of Unicode classes
+ for _, cl := range chr.classes {
+ if unicode.Is(cl, cur) {
+ if chr.inverted {
+ return nil, false
+ }
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ }
+
+ if chr.inverted {
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ return nil, false
+}
+
+func (p *parser) parseChoiceExpr(ch *choiceExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseChoiceExpr"))
+ }
+
+ for _, alt := range ch.alternatives {
+ p.pushV()
+ val, ok := p.parseExpr(alt)
+ p.popV()
+ if ok {
+ return val, ok
+ }
+ }
+ return nil, false
+}
+
+func (p *parser) parseLabeledExpr(lab *labeledExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseLabeledExpr"))
+ }
+
+ p.pushV()
+ val, ok := p.parseExpr(lab.expr)
+ p.popV()
+ if ok && lab.label != "" {
+ m := p.vstack[len(p.vstack)-1]
+ m[lab.label] = val
+ }
+ return val, ok
+}
+
+func (p *parser) parseLitMatcher(lit *litMatcher) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseLitMatcher"))
+ }
+
+ start := p.pt
+ for _, want := range lit.val {
+ cur := p.pt.rn
+ if lit.ignoreCase {
+ cur = unicode.ToLower(cur)
+ }
+ if cur != want {
+ p.restore(start)
+ return nil, false
+ }
+ p.read()
+ }
+ return p.sliceFrom(start), true
+}
+
+func (p *parser) parseNotCodeExpr(not *notCodeExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseNotCodeExpr"))
+ }
+
+ ok, err := not.run(p)
+ if err != nil {
+ p.addErr(err)
+ }
+ return nil, !ok
+}
+
+func (p *parser) parseNotExpr(not *notExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseNotExpr"))
+ }
+
+ pt := p.pt
+ p.pushV()
+ _, ok := p.parseExpr(not.expr)
+ p.popV()
+ p.restore(pt)
+ return nil, !ok
+}
+
+func (p *parser) parseOneOrMoreExpr(expr *oneOrMoreExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseOneOrMoreExpr"))
+ }
+
+ var vals []interface{}
+
+ for {
+ p.pushV()
+ val, ok := p.parseExpr(expr.expr)
+ p.popV()
+ if !ok {
+ if len(vals) == 0 {
+ // did not match once, no match
+ return nil, false
+ }
+ return vals, true
+ }
+ vals = append(vals, val)
+ }
+}
+
+func (p *parser) parseRuleRefExpr(ref *ruleRefExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseRuleRefExpr " + ref.name))
+ }
+
+ if ref.name == "" {
+ panic(fmt.Sprintf("%%s: invalid rule: missing name", ref.pos))
+ }
+
+ rule := p.rules[ref.name]
+ if rule == nil {
+ p.addErr(fmt.Errorf("undefined rule: %%s", ref.name))
+ return nil, false
+ }
+ return p.parseRule(rule)
+}
+
+func (p *parser) parseSeqExpr(seq *seqExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseSeqExpr"))
+ }
+
+ var vals []interface{}
+
+ pt := p.pt
+ for _, expr := range seq.exprs {
+ val, ok := p.parseExpr(expr)
+ if !ok {
+ p.restore(pt)
+ return nil, false
+ }
+ vals = append(vals, val)
+ }
+ return vals, true
+}
+
+func (p *parser) parseZeroOrMoreExpr(expr *zeroOrMoreExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseZeroOrMoreExpr"))
+ }
+
+ var vals []interface{}
+
+ for {
+ p.pushV()
+ val, ok := p.parseExpr(expr.expr)
+ p.popV()
+ if !ok {
+ return vals, true
+ }
+ vals = append(vals, val)
+ }
+}
+
+func (p *parser) parseZeroOrOneExpr(expr *zeroOrOneExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseZeroOrOneExpr"))
+ }
+
+ p.pushV()
+ val, _ := p.parseExpr(expr.expr)
+ p.popV()
+ // whether it matched or not, consider it a match
+ return val, true
+}
+
+func rangeTable(class string) *unicode.RangeTable {
+ if rt, ok := unicode.Categories[class]; ok {
+ return rt
+ }
+ if rt, ok := unicode.Properties[class]; ok {
+ return rt
+ }
+ if rt, ok := unicode.Scripts[class]; ok {
+ return rt
+ }
+
+ // cannot happen
+ panic(fmt.Sprintf("invalid Unicode class: %%s", class))
+}
+`
diff --git a/vendor/github.com/PuerkitoBio/pigeon/cmpast_test.go b/vendor/github.com/PuerkitoBio/pigeon/cmpast_test.go
new file mode 100644
index 0000000000..d381a4ca8e
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/cmpast_test.go
@@ -0,0 +1,304 @@
+package main
+
+import (
+ "strconv"
+ "testing"
+
+ "github.com/PuerkitoBio/pigeon/ast"
+)
+
+func compareGrammars(t *testing.T, src string, exp, got *ast.Grammar) bool {
+ if (exp.Init != nil) != (got.Init != nil) {
+ t.Errorf("%q: want Init? %t, got %t", src, exp.Init != nil, got.Init != nil)
+ return false
+ }
+ if exp.Init != nil {
+ if exp.Init.Val != got.Init.Val {
+ t.Errorf("%q: want Init %q, got %q", src, exp.Init.Val, got.Init.Val)
+ return false
+ }
+ }
+
+ rn, rm := len(exp.Rules), len(got.Rules)
+ if rn != rm {
+ t.Errorf("%q: want %d rules, got %d", src, rn, rm)
+ return false
+ }
+
+ for i, r := range got.Rules {
+ if !compareRule(t, src+": "+exp.Rules[i].Name.Val, exp.Rules[i], r) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func compareRule(t *testing.T, prefix string, exp, got *ast.Rule) bool {
+ if exp.Name.Val != got.Name.Val {
+ t.Errorf("%q: want rule name %q, got %q", prefix, exp.Name.Val, got.Name.Val)
+ return false
+ }
+ if (exp.DisplayName != nil) != (got.DisplayName != nil) {
+ t.Errorf("%q: want DisplayName? %t, got %t", prefix, exp.DisplayName != nil, got.DisplayName != nil)
+ return false
+ }
+ if exp.DisplayName != nil {
+ if exp.DisplayName.Val != got.DisplayName.Val {
+ t.Errorf("%q: want DisplayName %q, got %q", prefix, exp.DisplayName.Val, got.DisplayName.Val)
+ return false
+ }
+ }
+ return compareExpr(t, prefix, 0, exp.Expr, got.Expr)
+}
+
+func compareExpr(t *testing.T, prefix string, ix int, exp, got ast.Expression) bool {
+ ixPrefix := prefix + " (" + strconv.Itoa(ix) + ")"
+
+ switch exp := exp.(type) {
+ case *ast.ActionExpr:
+ got, ok := got.(*ast.ActionExpr)
+ if !ok {
+ t.Errorf("%q: want expression type %T, got %T", ixPrefix, exp, got)
+ return false
+ }
+ if (exp.Code != nil) != (got.Code != nil) {
+ t.Errorf("%q: want Code?: %t, got %t", ixPrefix, exp.Code != nil, got.Code != nil)
+ return false
+ }
+ if exp.Code != nil {
+ if exp.Code.Val != got.Code.Val {
+ t.Errorf("%q: want code %q, got %q", ixPrefix, exp.Code.Val, got.Code.Val)
+ return false
+ }
+ }
+ return compareExpr(t, prefix, ix+1, exp.Expr, got.Expr)
+
+ case *ast.AndCodeExpr:
+ got, ok := got.(*ast.AndCodeExpr)
+ if !ok {
+ t.Errorf("%q: want expression type %T, got %T", ixPrefix, exp, got)
+ return false
+ }
+ if (exp.Code != nil) != (got.Code != nil) {
+ t.Errorf("%q: want Code?: %t, got %t", ixPrefix, exp.Code != nil, got.Code != nil)
+ return false
+ }
+ if exp.Code != nil {
+ if exp.Code.Val != got.Code.Val {
+ t.Errorf("%q: want code %q, got %q", ixPrefix, exp.Code.Val, got.Code.Val)
+ return false
+ }
+ }
+
+ case *ast.AndExpr:
+ got, ok := got.(*ast.AndExpr)
+ if !ok {
+ t.Errorf("%q: want expression type %T, got %T", ixPrefix, exp, got)
+ return false
+ }
+ return compareExpr(t, prefix, ix+1, exp.Expr, got.Expr)
+
+ case *ast.AnyMatcher:
+ got, ok := got.(*ast.AnyMatcher)
+ if !ok {
+ t.Errorf("%q: want expression type %T, got %T", ixPrefix, exp, got)
+ return false
+ }
+ // for completion's sake...
+ if exp.Val != got.Val {
+ t.Errorf("%q: want value %q, got %q", ixPrefix, exp.Val, got.Val)
+ }
+
+ case *ast.CharClassMatcher:
+ got, ok := got.(*ast.CharClassMatcher)
+ if !ok {
+ t.Errorf("%q: want expression type %T, got %T", ixPrefix, exp, got)
+ return false
+ }
+ if exp.IgnoreCase != got.IgnoreCase {
+ t.Errorf("%q: want IgnoreCase %t, got %t", ixPrefix, exp.IgnoreCase, got.IgnoreCase)
+ return false
+ }
+ if exp.Inverted != got.Inverted {
+ t.Errorf("%q: want Inverted %t, got %t", ixPrefix, exp.Inverted, got.Inverted)
+ return false
+ }
+
+ ne, ng := len(exp.Chars), len(got.Chars)
+ if ne != ng {
+ t.Errorf("%q: want %d Chars, got %d (%v)", ixPrefix, ne, ng, got.Chars)
+ return false
+ }
+ for i, r := range exp.Chars {
+ if r != got.Chars[i] {
+ t.Errorf("%q: want Chars[%d] %#U, got %#U", ixPrefix, i, r, got.Chars[i])
+ return false
+ }
+ }
+
+ ne, ng = len(exp.Ranges), len(got.Ranges)
+ if ne != ng {
+ t.Errorf("%q: want %d Ranges, got %d", ixPrefix, ne, ng)
+ return false
+ }
+ for i, r := range exp.Ranges {
+ if r != got.Ranges[i] {
+ t.Errorf("%q: want Ranges[%d] %#U, got %#U", ixPrefix, i, r, got.Ranges[i])
+ return false
+ }
+ }
+
+ ne, ng = len(exp.UnicodeClasses), len(got.UnicodeClasses)
+ if ne != ng {
+ t.Errorf("%q: want %d UnicodeClasses, got %d", ixPrefix, ne, ng)
+ return false
+ }
+ for i, s := range exp.UnicodeClasses {
+ if s != got.UnicodeClasses[i] {
+ t.Errorf("%q: want UnicodeClasses[%d] %q, got %q", ixPrefix, i, s, got.UnicodeClasses[i])
+ return false
+ }
+ }
+
+ case *ast.ChoiceExpr:
+ got, ok := got.(*ast.ChoiceExpr)
+ if !ok {
+ t.Errorf("%q: want expression type %T, got %T", ixPrefix, exp, got)
+ return false
+ }
+ ne, ng := len(exp.Alternatives), len(got.Alternatives)
+ if ne != ng {
+ t.Errorf("%q: want %d Alternatives, got %d", ixPrefix, ne, ng)
+ return false
+ }
+
+ for i, alt := range exp.Alternatives {
+ if !compareExpr(t, prefix, ix+1, alt, got.Alternatives[i]) {
+ return false
+ }
+ }
+
+ case *ast.LabeledExpr:
+ got, ok := got.(*ast.LabeledExpr)
+ if !ok {
+ t.Errorf("%q: want expression type %T, got %T", ixPrefix, exp, got)
+ return false
+ }
+ if (exp.Label != nil) != (got.Label != nil) {
+ t.Errorf("%q: want Label?: %t, got %t", ixPrefix, exp.Label != nil, got.Label != nil)
+ return false
+ }
+ if exp.Label != nil {
+ if exp.Label.Val != got.Label.Val {
+ t.Errorf("%q: want label %q, got %q", ixPrefix, exp.Label.Val, got.Label.Val)
+ return false
+ }
+ }
+
+ return compareExpr(t, prefix, ix+1, exp.Expr, got.Expr)
+
+ case *ast.LitMatcher:
+ got, ok := got.(*ast.LitMatcher)
+ if !ok {
+ t.Errorf("%q: want expression type %T, got %T", ixPrefix, exp, got)
+ return false
+ }
+ if exp.IgnoreCase != got.IgnoreCase {
+ t.Errorf("%q: want IgnoreCase %t, got %t", ixPrefix, exp.IgnoreCase, got.IgnoreCase)
+ return false
+ }
+ if exp.Val != got.Val {
+ t.Errorf("%q: want value %q, got %q", ixPrefix, exp.Val, got.Val)
+ return false
+ }
+
+ case *ast.NotCodeExpr:
+ got, ok := got.(*ast.NotCodeExpr)
+ if !ok {
+ t.Errorf("%q: want expression type %T, got %T", ixPrefix, exp, got)
+ return false
+ }
+ if (exp.Code != nil) != (got.Code != nil) {
+ t.Errorf("%q: want Code?: %t, got %t", ixPrefix, exp.Code != nil, got.Code != nil)
+ return false
+ }
+ if exp.Code != nil {
+ if exp.Code.Val != got.Code.Val {
+ t.Errorf("%q: want code %q, got %q", ixPrefix, exp.Code.Val, got.Code.Val)
+ return false
+ }
+ }
+
+ case *ast.NotExpr:
+ got, ok := got.(*ast.NotExpr)
+ if !ok {
+ t.Errorf("%q: want expression type %T, got %T", ixPrefix, exp, got)
+ return false
+ }
+ return compareExpr(t, prefix, ix+1, exp.Expr, got.Expr)
+
+ case *ast.OneOrMoreExpr:
+ got, ok := got.(*ast.OneOrMoreExpr)
+ if !ok {
+ t.Errorf("%q: want expression type %T, got %T", ixPrefix, exp, got)
+ return false
+ }
+ return compareExpr(t, prefix, ix+1, exp.Expr, got.Expr)
+
+ case *ast.RuleRefExpr:
+ got, ok := got.(*ast.RuleRefExpr)
+ if !ok {
+ t.Errorf("%q: want expression type %T, got %T", ixPrefix, exp, got)
+ return false
+ }
+ if (exp.Name != nil) != (got.Name != nil) {
+ t.Errorf("%q: want Name?: %t, got %t", ixPrefix, exp.Name != nil, got.Name != nil)
+ return false
+ }
+ if exp.Name != nil {
+ if exp.Name.Val != got.Name.Val {
+ t.Errorf("%q: want name %q, got %q", ixPrefix, exp.Name.Val, got.Name.Val)
+ return false
+ }
+ }
+
+ case *ast.SeqExpr:
+ got, ok := got.(*ast.SeqExpr)
+ if !ok {
+ t.Errorf("%q: want expression type %T, got %T", ixPrefix, exp, got)
+ return false
+ }
+ ne, ng := len(exp.Exprs), len(got.Exprs)
+ if ne != ng {
+ t.Errorf("%q: want %d Exprs, got %d", ixPrefix, ne, ng)
+ return false
+ }
+
+ for i, expr := range exp.Exprs {
+ if !compareExpr(t, prefix, ix+1, expr, got.Exprs[i]) {
+ return false
+ }
+ }
+
+ case *ast.ZeroOrMoreExpr:
+ got, ok := got.(*ast.ZeroOrMoreExpr)
+ if !ok {
+ t.Errorf("%q: want expression type %T, got %T", ixPrefix, exp, got)
+ return false
+ }
+ return compareExpr(t, prefix, ix+1, exp.Expr, got.Expr)
+
+ case *ast.ZeroOrOneExpr:
+ got, ok := got.(*ast.ZeroOrOneExpr)
+ if !ok {
+ t.Errorf("%q: want expression type %T, got %T", ixPrefix, exp, got)
+ return false
+ }
+ return compareExpr(t, prefix, ix+1, exp.Expr, got.Expr)
+
+ default:
+ t.Fatalf("unexpected expression type %T", exp)
+ }
+ return true
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/doc.go b/vendor/github.com/PuerkitoBio/pigeon/doc.go
new file mode 100644
index 0000000000..448acbd0f4
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/doc.go
@@ -0,0 +1,438 @@
+/*
+Command pigeon generates parsers in Go from a PEG grammar.
+
+From Wikipedia [0]:
+
+ A parsing expression grammar is a type of analytic formal grammar, i.e.
+ it describes a formal language in terms of a set of rules for recognizing
+ strings in the language.
+
+Its features and syntax are inspired by the PEG.js project [1], while
+the implementation is loosely based on [2]. Formal presentation of the
+PEG theory by Bryan Ford is also an important reference [3]. An introductory
+blog post can be found at [4].
+
+ [0]: http://en.wikipedia.org/wiki/Parsing_expression_grammar
+ [1]: http://pegjs.org/
+ [2]: http://www.codeproject.com/Articles/29713/Parsing-Expression-Grammar-Support-for-C-Part
+ [3]: http://pdos.csail.mit.edu/~baford/packrat/popl04/peg-popl04.pdf
+ [4]: http://0value.com/A-PEG-parser-generator-for-Go
+
+Command-line usage
+
+The pigeon tool must be called with PEG input as defined
+by the accepted PEG syntax below. The grammar may be provided by a
+file or read from stdin. The generated parser is written to stdout
+by default.
+
+ pigeon [options] [GRAMMAR_FILE]
+
+The following options can be specified:
+
+ -cache : cache parser results to avoid exponential parsing time in
+ pathological cases. Can make the parsing slower for typical
+ cases and uses more memory (default: false).
+
+ -debug : boolean, print debugging info to stdout (default: false).
+
+ -no-recover : boolean, if set, do not recover from a panic. Useful
+ to access the panic stack when debugging, otherwise the panic
+ is converted to an error (default: false).
+
+ -o=FILE : string, output file where the generated parser will be
+ written (default: stdout).
+
+ -x : boolean, if set, do not build the parser, just parse the input grammar
+ (default: false).
+
+ -receiver-name=NAME : string, name of the receiver variable for the generated
+ code blocks. Non-initializer code blocks in the grammar end up as methods on the
+ *current type, and this option sets the name of the receiver (default: c).
+
+The tool makes no attempt to format the code, nor to detect the
+required imports. It is recommended to use goimports to properly generate
+the output code:
+ pigeon GRAMMAR_FILE | goimports > output_file.go
+
+The goimports tool can be installed with:
+ go get golang.org/x/tools/cmd/goimports
+
+If the code blocks in the grammar (see below, section "Code block") are golint-
+and go vet-compliant, then the resulting generated code will also be golint-
+and go vet-compliant.
+
+The generated code doesn't use any third-party dependency unless code blocks
+in the grammar require such a dependency.
+
+PEG syntax
+
+The accepted syntax for the grammar is formally defined in the
+grammar/pigeon.peg file, using the PEG syntax. What follows is an informal
+description of this syntax.
+
+Identifiers, whitespace, comments and literals follow the same
+notation as the Go language, as defined in the language specification
+(http://golang.org/ref/spec#Source_code_representation):
+
+ // single line comment*/
+// /* multi-line comment */
+/* 'x' (single quotes for single char literal)
+ "double quotes for string literal"
+ `backtick quotes for raw string literal`
+ RuleName (a valid identifier)
+
+The grammar must be Unicode text encoded in UTF-8. New lines are identified
+by the \n character (U+000A). Space (U+0020), horizontal tabs (U+0009) and
+carriage returns (U+000D) are considered whitespace and are ignored except
+to separate tokens.
+
+Rules
+
+A PEG grammar consists of a set of rules. A rule is an identifier followed
+by a rule definition operator and an expression. An optional display name -
+a string literal used in error messages instead of the rule identifier - can
+be specified after the rule identifier. E.g.:
+ RuleA "friendly name" = 'a'+ // RuleA is one or more lowercase 'a's
+
+The rule definition operator can be any one of those:
+ =, <-, ← (U+2190), ⟵ (U+27F5)
+
+Expressions
+
+A rule is defined by an expression. The following sections describe the
+various expression types. Expressions can be grouped by using parentheses,
+and a rule can be referenced by its identifier in place of an expression.
+
+Choice expression
+
+The choice expression is a list of expressions that will be tested in the
+order they are defined. The first one that matches will be used. Expressions
+are separated by the forward slash character "/". E.g.:
+ ChoiceExpr = A / B / C // A, B and C should be rules declared in the grammar
+
+Because the first match is used, it is important to think about the order
+of expressions. For example, in this rule, "<=" would never be used because
+the "<" expression comes first:
+ BadChoiceExpr = "<" / "<="
+
+Sequence expression
+
+The sequence expression is a list of expressions that must all match in
+that same order for the sequence expression to be considered a match.
+Expressions are separated by whitespace. E.g.:
+ SeqExpr = "A" "b" "c" // matches "Abc", but not "Acb"
+
+Labeled expression
+
+A labeled expression consists of an identifier followed by a colon ":"
+and an expression. A labeled expression introduces a variable named with
+the label that can be referenced in the code blocks in the same scope.
+The variable will have the value of the expression that follows the colon.
+E.g.:
+ LabeledExpr = value:[a-z]+ {
+ fmt.Println(value)
+ return value, nil
+ }
+
+The variable is typed as an empty interface, and the underlying type depends
+on the following:
+
+For terminals (character and string literals, character classes and
+the any matcher), the value is []byte. E.g.:
+ Rule = label:'a' { // label is []byte }
+
+For predicates (& and !), the value is always nil. E.g.:
+ Rule = label:&'a' { // label is nil }
+
+For a sequence, the value is a slice of empty interfaces, one for each
+expression value in the sequence. The underlying types of each value
+in the slice follow the same rules described here, recursively. E.g.:
+ Rule = label:('a' 'b') { // label is []interface{} }
+
+For a repetition (+ and *), the value is a slice of empty interfaces, one for
+each repetition. The underlying types of each value in the slice follow
+the same rules described here, recursively. E.g.:
+ Rule = label:[a-z]+ { // label is []interface{} }
+
+For a choice expression, the value is that of the matching choice. E.g.:
+ Rule = label:('a' / 'b') { // label is []byte }
+
+For the optional expression (?), the value is nil or the value of the
+expression. E.g.:
+ Rule = label:'a'? { // label is nil or []byte }
+
+Of course, the type of the value can be anything once an action code block
+is used. E.g.:
+ RuleA = label:'3' {
+ return 3, nil
+ }
+ RuleB = label:RuleA { // label is int }
+
+And and not expressions
+
+An expression prefixed with the ampersand "&" is the "and" predicate
+expression: it is considered a match if the following expression is a match,
+but it does not consume any input.
+
+An expression prefixed with the exclamation point "!" is the "not" predicate
+expression: it is considered a match if the following expression is not
+a match, but it does not consume any input. E.g.:
+ AndExpr = "A" &"B" // matches "A" if followed by a "B" (does not consume "B")
+ NotExpr = "A" !"B" // matches "A" if not followed by a "B" (does not consume "B")
+
+The expression following the & and ! operators can be a code block. In that
+case, the code block must return a bool and an error. The operator's semantic
+is the same, & is a match if the code block returns true, ! is a match if the
+code block returns false. The code block has access to any labeled value
+defined in its scope. E.g.:
+ CodeAndExpr = value:[a-z] &{
+ // can access the value local variable...
+ return true, nil
+ }
+
+Repeating expressions
+
+An expression followed by "*", "?" or "+" is a match if the expression
+occurs zero or more times ("*"), zero or one time "?" or one or more times
+("+") respectively. The match is greedy, it will match as many times as
+possible. E.g.
+ ZeroOrMoreAs = "A"*
+
+Literal matcher
+
+A literal matcher tries to match the input against a single character or a
+string literal. The literal may be a single-quoted single character, a
+double-quoted string or a backtick-quoted raw string. The same rules as in Go
+apply regarding the allowed characters and escapes.
+
+The literal may be followed by a lowercase "i" (outside the ending quote)
+to indicate that the match is case-insensitive. E.g.:
+ LiteralMatch = "Awesome\n"i // matches "awesome" followed by a newline
+
+Character class matcher
+
+A character class matcher tries to match the input against a class of characters
+inside square brackets "[...]". Inside the brackets, characters represent
+themselves and the same escapes as in string literals are available, except
+that the single- and double-quote escape is not valid, instead the closing
+square bracket "]" must be escaped to be used.
+
+Character ranges can be specified using the "[a-z]" notation. Unicode
+classes can be specified using the "[\pL]" notation, where L is a
+single-letter Unicode class of characters, or using the "[\p{Class}]"
+notation where Class is a valid Unicode class (e.g. "Latin").
+
+As for string literals, a lowercase "i" may follow the matcher (outside
+the ending square bracket) to indicate that the match is case-insensitive.
+A "^" as first character inside the square brackets indicates that the match
+is inverted (it is a match if the input does not match the character class
+matcher). E.g.:
+ NotAZ = [^a-z]i
+
+Any matcher
+
+The any matcher is represented by the dot ".". It matches any character
+except the end of file, thus the "!." expression is used to indicate "match
+the end of file". E.g.:
+ AnyChar = . // match a single character
+ EOF = !.
+
+Code block
+
+Code blocks can be added to generate custom Go code. There are three kinds
+of code blocks: the initializer, the action and the predicate. All code blocks
+appear inside curly braces "{...}".
+
+The initializer must appear first in the grammar, before any rule. It is
+copied as-is (minus the wrapping curly braces) at the top of the generated
+parser. It may contain function declarations, types, variables, etc. just
+like any Go file. Every symbol declared here will be available to all other
+code blocks. Although the initializer is optional in a valid grammar, it is
+usually required to generate a valid Go source code file (for the package
+clause). E.g.:
+ {
+ package main
+
+ func someHelper() {
+ // ...
+ }
+ }
+
+Action code blocks are code blocks declared after an expression in a rule.
+Those code blocks are turned into a method on the "*current" type in the
+generated source code. The method receives any labeled expression's value
+as argument (as interface{}) and must return two values, the first being
+the value of the expression (an interface{}), and the second an error.
+If a non-nil error is returned, it is added to the list of errors that the
+parser will return. E.g.:
+ RuleA = "A"+ {
+ // return the matched string, "c" is the default name for
+ // the *current receiver variable.
+ return string(c.text), nil
+ }
+
+Predicate code blocks are code blocks declared immediately after the and "&"
+or the not "!" operators. Like action code blocks, predicate code blocks
+are turned into a method on the "*current" type in the generated source code.
+The method receives any labeled expression's value as argument (as interface{})
+and must return two values, the first being a bool and the second an error.
+If a non-nil error is returned, it is added to the list of errors that the
+parser will return. E.g.:
+ RuleAB = [ab]i+ &{
+ return true, nil
+ }
+
+The current type is a struct that provides two useful fields that can be
+accessed in action and predicate code blocks: "pos" and "text".
+
+The "pos" field indicates the current position of the parser in the source
+input. It is itself a struct with three fields: "line", "col" and "offset".
+Line is a 1-based line number, col is a 1-based column number that counts
+runes from the start of the line, and offset is a 0-based byte offset.
+
+The "text" field is the slice of bytes of the current match. It is empty
+in a predicate code block.
+
+Using the generated parser
+
+The parser generated by pigeon exports a few symbols so that it can be used
+as a package with public functions to parse input text. The exported API is:
+ - Parse(string, []byte, ...Option) (interface{}, error)
+ - ParseFile(string, ...Option) (interface{}, error)
+ - ParseReader(string, io.Reader, ...Option) (interface{}, error)
+ - Debug(bool) Option
+ - Memoize(bool) Option
+ - Recover(bool) Option
+
+See the godoc page of the generated parser for the test/predicates grammar
+for an example documentation page of the exported API:
+http://godoc.org/github.com/PuerkitoBio/pigeon/test/predicates.
+
+Like the grammar used to generate the parser, the input text must be
+UTF-8-encoded Unicode.
+
+The start rule of the parser is the first rule in the PEG grammar used
+to generate the parser. A call to any of the Parse* functions returns
+the value generated by executing the grammar on the provided input text,
+and an optional error.
+
+Typically, the grammar should generate some kind of abstract syntax tree (AST),
+but for simple grammars it may evaluate the result immediately, such as in
+the examples/calculator example. There are no constraints imposed on the
+author of the grammar, it can return whatever is needed.
+
+Error reporting
+
+When the parser returns a non-nil error, the error is always of type errList,
+which is defined as a slice of errors ([]error). Each error in the list is
+of type *parserError. This is a struct that has an "Inner" field that can be
+used to access the original error.
+
+So if a code block returns some well-known error like:
+ {
+ return nil, io.EOF
+ }
+
+The original error can be accessed this way:
+ _, err := ParseFile("some_file")
+ if err != nil {
+ list := err.(errList)
+ for _, err := range list {
+ pe := err.(*parserError)
+ if pe.Inner == io.EOF {
+ // ...
+ }
+ }
+ }
+
+By defaut the parser will continue after an error is returned and will
+cumulate all errors found during parsing. If the grammar reaches a point
+where it shouldn't continue, a panic statement can be used to terminate
+parsing. The panic will be caught at the top-level of the Parse* call
+and will be converted into a *parserError like any error, and an errList
+will still be returned to the caller.
+
+The divide by zero error in the examples/calculator grammar leverages this
+feature (no special code is needed to handle division by zero, if it
+happens, the runtime panics and it is recovered and returned as a parsing
+error).
+
+Providing good error reporting in a parser is not a trivial task. Part
+of it is provided by the pigeon tool, by offering features such as
+filename, position and rule name in the error message, but an
+important part of good error reporting needs to be done by the grammar
+author.
+
+For example, many programming languages use double-quotes for string literals.
+Usually, if the opening quote is found, the closing quote is expected, and if
+none is found, there won't be any other rule that will match, there's no need
+to backtrack and try other choices, an error should be added to the list
+and the match should be consumed.
+
+In order to do this, the grammar can look something like this:
+
+ StringLiteral = '"' ValidStringChar* '"' {
+ // this is the valid case, build string literal node
+ // node = ...
+ return node, nil
+ } / '"' ValidStringChar* !'"' {
+ // invalid case, build a replacement string literal node or build a BadNode
+ // node = ...
+ return node, errors.New("string literal not terminated")
+ }
+
+This is just one example, but it illustrates the idea that error reporting
+needs to be thought out when designing the grammar.
+
+API stability
+
+Generated parsers have user-provided code mixed with pigeon code
+in the same package, so there is no package
+boundary in the resulting code to prevent access to unexported symbols.
+What is meant to be implementation
+details in pigeon is also available to user code - which doesn't mean
+it should be used.
+
+For this reason, it is important to precisely define what is intended to be
+the supported API of pigeon, the parts that will be stable
+in future versions.
+
+The "stability" of the API attempts to make a similar guarantee as the
+Go 1 compatibility [5]. The following lists what part of the
+current pigeon code falls under that guarantee (features may be added in
+the future):
+
+ - The pigeon command-line flags and arguments: those will not be removed
+ and will maintain the same semantics.
+
+ - The explicitly exported API generated by pigeon. See [6] for the
+ documentation of this API on a generated parser.
+
+ - The PEG syntax, as documented above.
+
+ - The code blocks (except the initializer) will always be generated as
+ methods on the *current type, and this type is guaranteed to have
+ the fields pos (type position) and text (type []byte). There are no
+ guarantees on other fields and methods of this type.
+
+ - The position type will always have the fields line, col and offset,
+ all defined as int. There are no guarantees on other fields and methods
+ of this type.
+
+ - The type of the error value returned by the Parse* functions, when
+ not nil, will always be errList defined as a []error. There are no
+ guarantees on methods of this type, other than the fact it implements the
+ error interface.
+
+ - Individual errors in the errList will always be of type *parserError,
+ and this type is guaranteed to have an Inner field that contains the
+ original error value. There are no guarantees on other fields and methods
+ of this type.
+
+References:
+
+ [5]: https://golang.org/doc/go1compat
+ [6]: http://godoc.org/github.com/PuerkitoBio/pigeon/test/predicates
+
+*/
+package main
diff --git a/vendor/github.com/PuerkitoBio/pigeon/examples/calculator/calculator.go b/vendor/github.com/PuerkitoBio/pigeon/examples/calculator/calculator.go
new file mode 100644
index 0000000000..69e81fe046
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/examples/calculator/calculator.go
@@ -0,0 +1,1309 @@
+// Command calculator is a small PEG-generated parser that computes
+// simple math using integers.
+//
+// Example usage: $ calculator "3 + (2 - 5 * 12)"
+//
+// Inspired by pegjs arithmetic example:
+// https://github.com/pegjs/pegjs/blob/master/examples/arithmetics.pegjs
+//
+package main
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+var ops = map[string]func(int, int) int{
+ "+": func(l, r int) int {
+ return l + r
+ },
+ "-": func(l, r int) int {
+ return l - r
+ },
+ "*": func(l, r int) int {
+ return l * r
+ },
+ "/": func(l, r int) int {
+ return l / r
+ },
+}
+
+// for testing purpose
+var cntCodeBlocks int
+
+func main() {
+ if len(os.Args) != 2 {
+ log.Fatal("Usage: calculator 'EXPR'")
+ }
+ got, err := ParseReader("", strings.NewReader(os.Args[1]))
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Println("=", got)
+}
+
+func toIfaceSlice(v interface{}) []interface{} {
+ if v == nil {
+ return nil
+ }
+ return v.([]interface{})
+}
+
+func eval(first, rest interface{}) int {
+ l := first.(int)
+ restSl := toIfaceSlice(rest)
+ for _, v := range restSl {
+ restExpr := toIfaceSlice(v)
+ r := restExpr[3].(int)
+ op := restExpr[1].(string)
+ l = ops[op](l, r)
+ }
+ return l
+}
+
+var g = &grammar{
+ rules: []*rule{
+ {
+ name: "Input",
+ pos: position{line: 61, col: 1, offset: 1247},
+ expr: &actionExpr{
+ pos: position{line: 61, col: 10, offset: 1256},
+ run: (*parser).callonInput1,
+ expr: &seqExpr{
+ pos: position{line: 61, col: 10, offset: 1256},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 61, col: 10, offset: 1256},
+ label: "expr",
+ expr: &ruleRefExpr{
+ pos: position{line: 61, col: 15, offset: 1261},
+ name: "Expr",
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 61, col: 20, offset: 1266},
+ name: "EOF",
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Expr",
+ pos: position{line: 66, col: 1, offset: 1316},
+ expr: &actionExpr{
+ pos: position{line: 66, col: 9, offset: 1324},
+ run: (*parser).callonExpr1,
+ expr: &seqExpr{
+ pos: position{line: 66, col: 9, offset: 1324},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 66, col: 9, offset: 1324},
+ name: "_",
+ },
+ &labeledExpr{
+ pos: position{line: 66, col: 11, offset: 1326},
+ label: "first",
+ expr: &ruleRefExpr{
+ pos: position{line: 66, col: 17, offset: 1332},
+ name: "Term",
+ },
+ },
+ &labeledExpr{
+ pos: position{line: 66, col: 22, offset: 1337},
+ label: "rest",
+ expr: &zeroOrMoreExpr{
+ pos: position{line: 66, col: 27, offset: 1342},
+ expr: &seqExpr{
+ pos: position{line: 66, col: 29, offset: 1344},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 66, col: 29, offset: 1344},
+ name: "_",
+ },
+ &ruleRefExpr{
+ pos: position{line: 66, col: 31, offset: 1346},
+ name: "AddOp",
+ },
+ &ruleRefExpr{
+ pos: position{line: 66, col: 37, offset: 1352},
+ name: "_",
+ },
+ &ruleRefExpr{
+ pos: position{line: 66, col: 39, offset: 1354},
+ name: "Term",
+ },
+ },
+ },
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 66, col: 47, offset: 1362},
+ name: "_",
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Term",
+ pos: position{line: 71, col: 1, offset: 1423},
+ expr: &actionExpr{
+ pos: position{line: 71, col: 9, offset: 1431},
+ run: (*parser).callonTerm1,
+ expr: &seqExpr{
+ pos: position{line: 71, col: 9, offset: 1431},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 71, col: 9, offset: 1431},
+ label: "first",
+ expr: &ruleRefExpr{
+ pos: position{line: 71, col: 15, offset: 1437},
+ name: "Factor",
+ },
+ },
+ &labeledExpr{
+ pos: position{line: 71, col: 22, offset: 1444},
+ label: "rest",
+ expr: &zeroOrMoreExpr{
+ pos: position{line: 71, col: 27, offset: 1449},
+ expr: &seqExpr{
+ pos: position{line: 71, col: 29, offset: 1451},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 71, col: 29, offset: 1451},
+ name: "_",
+ },
+ &ruleRefExpr{
+ pos: position{line: 71, col: 31, offset: 1453},
+ name: "MulOp",
+ },
+ &ruleRefExpr{
+ pos: position{line: 71, col: 37, offset: 1459},
+ name: "_",
+ },
+ &ruleRefExpr{
+ pos: position{line: 71, col: 39, offset: 1461},
+ name: "Factor",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Factor",
+ pos: position{line: 76, col: 1, offset: 1530},
+ expr: &choiceExpr{
+ pos: position{line: 76, col: 11, offset: 1540},
+ alternatives: []interface{}{
+ &actionExpr{
+ pos: position{line: 76, col: 11, offset: 1540},
+ run: (*parser).callonFactor2,
+ expr: &seqExpr{
+ pos: position{line: 76, col: 11, offset: 1540},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 76, col: 11, offset: 1540},
+ val: "(",
+ ignoreCase: false,
+ },
+ &labeledExpr{
+ pos: position{line: 76, col: 15, offset: 1544},
+ label: "expr",
+ expr: &ruleRefExpr{
+ pos: position{line: 76, col: 20, offset: 1549},
+ name: "Expr",
+ },
+ },
+ &litMatcher{
+ pos: position{line: 76, col: 25, offset: 1554},
+ val: ")",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ &actionExpr{
+ pos: position{line: 79, col: 5, offset: 1605},
+ run: (*parser).callonFactor8,
+ expr: &labeledExpr{
+ pos: position{line: 79, col: 5, offset: 1605},
+ label: "integer",
+ expr: &ruleRefExpr{
+ pos: position{line: 79, col: 13, offset: 1613},
+ name: "Integer",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "AddOp",
+ pos: position{line: 84, col: 1, offset: 1670},
+ expr: &actionExpr{
+ pos: position{line: 84, col: 10, offset: 1679},
+ run: (*parser).callonAddOp1,
+ expr: &choiceExpr{
+ pos: position{line: 84, col: 12, offset: 1681},
+ alternatives: []interface{}{
+ &litMatcher{
+ pos: position{line: 84, col: 12, offset: 1681},
+ val: "+",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 84, col: 18, offset: 1687},
+ val: "-",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "MulOp",
+ pos: position{line: 89, col: 1, offset: 1749},
+ expr: &actionExpr{
+ pos: position{line: 89, col: 10, offset: 1758},
+ run: (*parser).callonMulOp1,
+ expr: &choiceExpr{
+ pos: position{line: 89, col: 12, offset: 1760},
+ alternatives: []interface{}{
+ &litMatcher{
+ pos: position{line: 89, col: 12, offset: 1760},
+ val: "*",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 89, col: 18, offset: 1766},
+ val: "/",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Integer",
+ pos: position{line: 94, col: 1, offset: 1828},
+ expr: &actionExpr{
+ pos: position{line: 94, col: 12, offset: 1839},
+ run: (*parser).callonInteger1,
+ expr: &seqExpr{
+ pos: position{line: 94, col: 12, offset: 1839},
+ exprs: []interface{}{
+ &zeroOrOneExpr{
+ pos: position{line: 94, col: 12, offset: 1839},
+ expr: &litMatcher{
+ pos: position{line: 94, col: 12, offset: 1839},
+ val: "-",
+ ignoreCase: false,
+ },
+ },
+ &oneOrMoreExpr{
+ pos: position{line: 94, col: 17, offset: 1844},
+ expr: &charClassMatcher{
+ pos: position{line: 94, col: 17, offset: 1844},
+ val: "[0-9]",
+ ranges: []rune{'0', '9'},
+ ignoreCase: false,
+ inverted: false,
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "_",
+ displayName: "\"whitespace\"",
+ pos: position{line: 99, col: 1, offset: 1916},
+ expr: &zeroOrMoreExpr{
+ pos: position{line: 99, col: 19, offset: 1934},
+ expr: &charClassMatcher{
+ pos: position{line: 99, col: 19, offset: 1934},
+ val: "[ \\n\\t\\r]",
+ chars: []rune{' ', '\n', '\t', '\r'},
+ ignoreCase: false,
+ inverted: false,
+ },
+ },
+ },
+ {
+ name: "EOF",
+ pos: position{line: 101, col: 1, offset: 1946},
+ expr: ¬Expr{
+ pos: position{line: 101, col: 8, offset: 1953},
+ expr: &anyMatcher{
+ line: 101, col: 9, offset: 1954,
+ },
+ },
+ },
+ },
+}
+
+func (c *current) onInput1(expr interface{}) (interface{}, error) {
+ cntCodeBlocks++
+ return expr, nil
+}
+
+func (p *parser) callonInput1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onInput1(stack["expr"])
+}
+
+func (c *current) onExpr1(first, rest interface{}) (interface{}, error) {
+ cntCodeBlocks++
+ return eval(first, rest), nil
+}
+
+func (p *parser) callonExpr1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onExpr1(stack["first"], stack["rest"])
+}
+
+func (c *current) onTerm1(first, rest interface{}) (interface{}, error) {
+ cntCodeBlocks++
+ return eval(first, rest), nil
+}
+
+func (p *parser) callonTerm1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onTerm1(stack["first"], stack["rest"])
+}
+
+func (c *current) onFactor2(expr interface{}) (interface{}, error) {
+ cntCodeBlocks++
+ return expr, nil
+}
+
+func (p *parser) callonFactor2() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onFactor2(stack["expr"])
+}
+
+func (c *current) onFactor8(integer interface{}) (interface{}, error) {
+ cntCodeBlocks++
+ return integer, nil
+}
+
+func (p *parser) callonFactor8() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onFactor8(stack["integer"])
+}
+
+func (c *current) onAddOp1() (interface{}, error) {
+ cntCodeBlocks++
+ return string(c.text), nil
+}
+
+func (p *parser) callonAddOp1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onAddOp1()
+}
+
+func (c *current) onMulOp1() (interface{}, error) {
+ cntCodeBlocks++
+ return string(c.text), nil
+}
+
+func (p *parser) callonMulOp1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onMulOp1()
+}
+
+func (c *current) onInteger1() (interface{}, error) {
+ cntCodeBlocks++
+ return strconv.Atoi(string(c.text))
+}
+
+func (p *parser) callonInteger1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onInteger1()
+}
+
+var (
+ // errNoRule is returned when the grammar to parse has no rule.
+ errNoRule = errors.New("grammar has no rule")
+
+ // errInvalidEncoding is returned when the source is not properly
+ // utf8-encoded.
+ errInvalidEncoding = errors.New("invalid encoding")
+
+ // errNoMatch is returned if no match could be found.
+ errNoMatch = errors.New("no match found")
+)
+
+// Option is a function that can set an option on the parser. It returns
+// the previous setting as an Option.
+type Option func(*parser) Option
+
+// Debug creates an Option to set the debug flag to b. When set to true,
+// debugging information is printed to stdout while parsing.
+//
+// The default is false.
+func Debug(b bool) Option {
+ return func(p *parser) Option {
+ old := p.debug
+ p.debug = b
+ return Debug(old)
+ }
+}
+
+// Memoize creates an Option to set the memoize flag to b. When set to true,
+// the parser will cache all results so each expression is evaluated only
+// once. This guarantees linear parsing time even for pathological cases,
+// at the expense of more memory and slower times for typical cases.
+//
+// The default is false.
+func Memoize(b bool) Option {
+ return func(p *parser) Option {
+ old := p.memoize
+ p.memoize = b
+ return Memoize(old)
+ }
+}
+
+// Recover creates an Option to set the recover flag to b. When set to
+// true, this causes the parser to recover from panics and convert it
+// to an error. Setting it to false can be useful while debugging to
+// access the full stack trace.
+//
+// The default is true.
+func Recover(b bool) Option {
+ return func(p *parser) Option {
+ old := p.recover
+ p.recover = b
+ return Recover(old)
+ }
+}
+
+// ParseFile parses the file identified by filename.
+func ParseFile(filename string, opts ...Option) (interface{}, error) {
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ return ParseReader(filename, f, opts...)
+}
+
+// ParseReader parses the data from r using filename as information in the
+// error messages.
+func ParseReader(filename string, r io.Reader, opts ...Option) (interface{}, error) {
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+
+ return Parse(filename, b, opts...)
+}
+
+// Parse parses the data from b using filename as information in the
+// error messages.
+func Parse(filename string, b []byte, opts ...Option) (interface{}, error) {
+ return newParser(filename, b, opts...).parse(g)
+}
+
+// position records a position in the text.
+type position struct {
+ line, col, offset int
+}
+
+func (p position) String() string {
+ return fmt.Sprintf("%d:%d [%d]", p.line, p.col, p.offset)
+}
+
+// savepoint stores all state required to go back to this point in the
+// parser.
+type savepoint struct {
+ position
+ rn rune
+ w int
+}
+
+type current struct {
+ pos position // start position of the match
+ text []byte // raw text of the match
+}
+
+// the AST types...
+
+type grammar struct {
+ pos position
+ rules []*rule
+}
+
+type rule struct {
+ pos position
+ name string
+ displayName string
+ expr interface{}
+}
+
+type choiceExpr struct {
+ pos position
+ alternatives []interface{}
+}
+
+type actionExpr struct {
+ pos position
+ expr interface{}
+ run func(*parser) (interface{}, error)
+}
+
+type seqExpr struct {
+ pos position
+ exprs []interface{}
+}
+
+type labeledExpr struct {
+ pos position
+ label string
+ expr interface{}
+}
+
+type expr struct {
+ pos position
+ expr interface{}
+}
+
+type andExpr expr
+type notExpr expr
+type zeroOrOneExpr expr
+type zeroOrMoreExpr expr
+type oneOrMoreExpr expr
+
+type ruleRefExpr struct {
+ pos position
+ name string
+}
+
+type andCodeExpr struct {
+ pos position
+ run func(*parser) (bool, error)
+}
+
+type notCodeExpr struct {
+ pos position
+ run func(*parser) (bool, error)
+}
+
+type litMatcher struct {
+ pos position
+ val string
+ ignoreCase bool
+}
+
+type charClassMatcher struct {
+ pos position
+ val string
+ chars []rune
+ ranges []rune
+ classes []*unicode.RangeTable
+ ignoreCase bool
+ inverted bool
+}
+
+type anyMatcher position
+
+// errList cumulates the errors found by the parser.
+type errList []error
+
+func (e *errList) add(err error) {
+ *e = append(*e, err)
+}
+
+func (e errList) err() error {
+ if len(e) == 0 {
+ return nil
+ }
+ e.dedupe()
+ return e
+}
+
+func (e *errList) dedupe() {
+ var cleaned []error
+ set := make(map[string]bool)
+ for _, err := range *e {
+ if msg := err.Error(); !set[msg] {
+ set[msg] = true
+ cleaned = append(cleaned, err)
+ }
+ }
+ *e = cleaned
+}
+
+func (e errList) Error() string {
+ switch len(e) {
+ case 0:
+ return ""
+ case 1:
+ return e[0].Error()
+ default:
+ var buf bytes.Buffer
+
+ for i, err := range e {
+ if i > 0 {
+ buf.WriteRune('\n')
+ }
+ buf.WriteString(err.Error())
+ }
+ return buf.String()
+ }
+}
+
+// parserError wraps an error with a prefix indicating the rule in which
+// the error occurred. The original error is stored in the Inner field.
+type parserError struct {
+ Inner error
+ pos position
+ prefix string
+}
+
+// Error returns the error message.
+func (p *parserError) Error() string {
+ return p.prefix + ": " + p.Inner.Error()
+}
+
+// newParser creates a parser with the specified input source and options.
+func newParser(filename string, b []byte, opts ...Option) *parser {
+ p := &parser{
+ filename: filename,
+ errs: new(errList),
+ data: b,
+ pt: savepoint{position: position{line: 1}},
+ recover: true,
+ }
+ p.setOptions(opts)
+ return p
+}
+
+// setOptions applies the options to the parser.
+func (p *parser) setOptions(opts []Option) {
+ for _, opt := range opts {
+ opt(p)
+ }
+}
+
+type resultTuple struct {
+ v interface{}
+ b bool
+ end savepoint
+}
+
+type parser struct {
+ filename string
+ pt savepoint
+ cur current
+
+ data []byte
+ errs *errList
+
+ recover bool
+ debug bool
+ depth int
+
+ memoize bool
+ // memoization table for the packrat algorithm:
+ // map[offset in source] map[expression or rule] {value, match}
+ memo map[int]map[interface{}]resultTuple
+
+ // rules table, maps the rule identifier to the rule node
+ rules map[string]*rule
+ // variables stack, map of label to value
+ vstack []map[string]interface{}
+ // rule stack, allows identification of the current rule in errors
+ rstack []*rule
+
+ // stats
+ exprCnt int
+}
+
+// push a variable set on the vstack.
+func (p *parser) pushV() {
+ if cap(p.vstack) == len(p.vstack) {
+ // create new empty slot in the stack
+ p.vstack = append(p.vstack, nil)
+ } else {
+ // slice to 1 more
+ p.vstack = p.vstack[:len(p.vstack)+1]
+ }
+
+ // get the last args set
+ m := p.vstack[len(p.vstack)-1]
+ if m != nil && len(m) == 0 {
+ // empty map, all good
+ return
+ }
+
+ m = make(map[string]interface{})
+ p.vstack[len(p.vstack)-1] = m
+}
+
+// pop a variable set from the vstack.
+func (p *parser) popV() {
+ // if the map is not empty, clear it
+ m := p.vstack[len(p.vstack)-1]
+ if len(m) > 0 {
+ // GC that map
+ p.vstack[len(p.vstack)-1] = nil
+ }
+ p.vstack = p.vstack[:len(p.vstack)-1]
+}
+
+func (p *parser) print(prefix, s string) string {
+ if !p.debug {
+ return s
+ }
+
+ fmt.Printf("%s %d:%d:%d: %s [%#U]\n",
+ prefix, p.pt.line, p.pt.col, p.pt.offset, s, p.pt.rn)
+ return s
+}
+
+func (p *parser) in(s string) string {
+ p.depth++
+ return p.print(strings.Repeat(" ", p.depth)+">", s)
+}
+
+func (p *parser) out(s string) string {
+ p.depth--
+ return p.print(strings.Repeat(" ", p.depth)+"<", s)
+}
+
+func (p *parser) addErr(err error) {
+ p.addErrAt(err, p.pt.position)
+}
+
+func (p *parser) addErrAt(err error, pos position) {
+ var buf bytes.Buffer
+ if p.filename != "" {
+ buf.WriteString(p.filename)
+ }
+ if buf.Len() > 0 {
+ buf.WriteString(":")
+ }
+ buf.WriteString(fmt.Sprintf("%d:%d (%d)", pos.line, pos.col, pos.offset))
+ if len(p.rstack) > 0 {
+ if buf.Len() > 0 {
+ buf.WriteString(": ")
+ }
+ rule := p.rstack[len(p.rstack)-1]
+ if rule.displayName != "" {
+ buf.WriteString("rule " + rule.displayName)
+ } else {
+ buf.WriteString("rule " + rule.name)
+ }
+ }
+ pe := &parserError{Inner: err, prefix: buf.String()}
+ p.errs.add(pe)
+}
+
+// read advances the parser to the next rune.
+func (p *parser) read() {
+ p.pt.offset += p.pt.w
+ rn, n := utf8.DecodeRune(p.data[p.pt.offset:])
+ p.pt.rn = rn
+ p.pt.w = n
+ p.pt.col++
+ if rn == '\n' {
+ p.pt.line++
+ p.pt.col = 0
+ }
+
+ if rn == utf8.RuneError {
+ if n > 0 {
+ p.addErr(errInvalidEncoding)
+ }
+ }
+}
+
+// restore parser position to the savepoint pt.
+func (p *parser) restore(pt savepoint) {
+ if p.debug {
+ defer p.out(p.in("restore"))
+ }
+ if pt.offset == p.pt.offset {
+ return
+ }
+ p.pt = pt
+}
+
+// get the slice of bytes from the savepoint start to the current position.
+func (p *parser) sliceFrom(start savepoint) []byte {
+ return p.data[start.position.offset:p.pt.position.offset]
+}
+
+func (p *parser) getMemoized(node interface{}) (resultTuple, bool) {
+ if len(p.memo) == 0 {
+ return resultTuple{}, false
+ }
+ m := p.memo[p.pt.offset]
+ if len(m) == 0 {
+ return resultTuple{}, false
+ }
+ res, ok := m[node]
+ return res, ok
+}
+
+func (p *parser) setMemoized(pt savepoint, node interface{}, tuple resultTuple) {
+ if p.memo == nil {
+ p.memo = make(map[int]map[interface{}]resultTuple)
+ }
+ m := p.memo[pt.offset]
+ if m == nil {
+ m = make(map[interface{}]resultTuple)
+ p.memo[pt.offset] = m
+ }
+ m[node] = tuple
+}
+
+func (p *parser) buildRulesTable(g *grammar) {
+ p.rules = make(map[string]*rule, len(g.rules))
+ for _, r := range g.rules {
+ p.rules[r.name] = r
+ }
+}
+
+func (p *parser) parse(g *grammar) (val interface{}, err error) {
+ if len(g.rules) == 0 {
+ p.addErr(errNoRule)
+ return nil, p.errs.err()
+ }
+
+ // TODO : not super critical but this could be generated
+ p.buildRulesTable(g)
+
+ if p.recover {
+ // panic can be used in action code to stop parsing immediately
+ // and return the panic as an error.
+ defer func() {
+ if e := recover(); e != nil {
+ if p.debug {
+ defer p.out(p.in("panic handler"))
+ }
+ val = nil
+ switch e := e.(type) {
+ case error:
+ p.addErr(e)
+ default:
+ p.addErr(fmt.Errorf("%v", e))
+ }
+ err = p.errs.err()
+ }
+ }()
+ }
+
+ // start rule is rule [0]
+ p.read() // advance to first rune
+ val, ok := p.parseRule(g.rules[0])
+ if !ok {
+ if len(*p.errs) == 0 {
+ // make sure this doesn't go out silently
+ p.addErr(errNoMatch)
+ }
+ return nil, p.errs.err()
+ }
+ return val, p.errs.err()
+}
+
+func (p *parser) parseRule(rule *rule) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseRule " + rule.name))
+ }
+
+ if p.memoize {
+ res, ok := p.getMemoized(rule)
+ if ok {
+ p.restore(res.end)
+ return res.v, res.b
+ }
+ }
+
+ start := p.pt
+ p.rstack = append(p.rstack, rule)
+ p.pushV()
+ val, ok := p.parseExpr(rule.expr)
+ p.popV()
+ p.rstack = p.rstack[:len(p.rstack)-1]
+ if ok && p.debug {
+ p.print(strings.Repeat(" ", p.depth)+"MATCH", string(p.sliceFrom(start)))
+ }
+
+ if p.memoize {
+ p.setMemoized(start, rule, resultTuple{val, ok, p.pt})
+ }
+ return val, ok
+}
+
+func (p *parser) parseExpr(expr interface{}) (interface{}, bool) {
+ var pt savepoint
+ var ok bool
+
+ if p.memoize {
+ res, ok := p.getMemoized(expr)
+ if ok {
+ p.restore(res.end)
+ return res.v, res.b
+ }
+ pt = p.pt
+ }
+
+ p.exprCnt++
+ var val interface{}
+ switch expr := expr.(type) {
+ case *actionExpr:
+ val, ok = p.parseActionExpr(expr)
+ case *andCodeExpr:
+ val, ok = p.parseAndCodeExpr(expr)
+ case *andExpr:
+ val, ok = p.parseAndExpr(expr)
+ case *anyMatcher:
+ val, ok = p.parseAnyMatcher(expr)
+ case *charClassMatcher:
+ val, ok = p.parseCharClassMatcher(expr)
+ case *choiceExpr:
+ val, ok = p.parseChoiceExpr(expr)
+ case *labeledExpr:
+ val, ok = p.parseLabeledExpr(expr)
+ case *litMatcher:
+ val, ok = p.parseLitMatcher(expr)
+ case *notCodeExpr:
+ val, ok = p.parseNotCodeExpr(expr)
+ case *notExpr:
+ val, ok = p.parseNotExpr(expr)
+ case *oneOrMoreExpr:
+ val, ok = p.parseOneOrMoreExpr(expr)
+ case *ruleRefExpr:
+ val, ok = p.parseRuleRefExpr(expr)
+ case *seqExpr:
+ val, ok = p.parseSeqExpr(expr)
+ case *zeroOrMoreExpr:
+ val, ok = p.parseZeroOrMoreExpr(expr)
+ case *zeroOrOneExpr:
+ val, ok = p.parseZeroOrOneExpr(expr)
+ default:
+ panic(fmt.Sprintf("unknown expression type %T", expr))
+ }
+ if p.memoize {
+ p.setMemoized(pt, expr, resultTuple{val, ok, p.pt})
+ }
+ return val, ok
+}
+
+func (p *parser) parseActionExpr(act *actionExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseActionExpr"))
+ }
+
+ start := p.pt
+ val, ok := p.parseExpr(act.expr)
+ if ok {
+ p.cur.pos = start.position
+ p.cur.text = p.sliceFrom(start)
+ actVal, err := act.run(p)
+ if err != nil {
+ p.addErrAt(err, start.position)
+ }
+ val = actVal
+ }
+ if ok && p.debug {
+ p.print(strings.Repeat(" ", p.depth)+"MATCH", string(p.sliceFrom(start)))
+ }
+ return val, ok
+}
+
+func (p *parser) parseAndCodeExpr(and *andCodeExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseAndCodeExpr"))
+ }
+
+ ok, err := and.run(p)
+ if err != nil {
+ p.addErr(err)
+ }
+ return nil, ok
+}
+
+func (p *parser) parseAndExpr(and *andExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseAndExpr"))
+ }
+
+ pt := p.pt
+ p.pushV()
+ _, ok := p.parseExpr(and.expr)
+ p.popV()
+ p.restore(pt)
+ return nil, ok
+}
+
+func (p *parser) parseAnyMatcher(any *anyMatcher) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseAnyMatcher"))
+ }
+
+ if p.pt.rn != utf8.RuneError {
+ start := p.pt
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ return nil, false
+}
+
+func (p *parser) parseCharClassMatcher(chr *charClassMatcher) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseCharClassMatcher"))
+ }
+
+ cur := p.pt.rn
+ // can't match EOF
+ if cur == utf8.RuneError {
+ return nil, false
+ }
+ start := p.pt
+ if chr.ignoreCase {
+ cur = unicode.ToLower(cur)
+ }
+
+ // try to match in the list of available chars
+ for _, rn := range chr.chars {
+ if rn == cur {
+ if chr.inverted {
+ return nil, false
+ }
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ }
+
+ // try to match in the list of ranges
+ for i := 0; i < len(chr.ranges); i += 2 {
+ if cur >= chr.ranges[i] && cur <= chr.ranges[i+1] {
+ if chr.inverted {
+ return nil, false
+ }
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ }
+
+ // try to match in the list of Unicode classes
+ for _, cl := range chr.classes {
+ if unicode.Is(cl, cur) {
+ if chr.inverted {
+ return nil, false
+ }
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ }
+
+ if chr.inverted {
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ return nil, false
+}
+
+func (p *parser) parseChoiceExpr(ch *choiceExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseChoiceExpr"))
+ }
+
+ for _, alt := range ch.alternatives {
+ p.pushV()
+ val, ok := p.parseExpr(alt)
+ p.popV()
+ if ok {
+ return val, ok
+ }
+ }
+ return nil, false
+}
+
+func (p *parser) parseLabeledExpr(lab *labeledExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseLabeledExpr"))
+ }
+
+ p.pushV()
+ val, ok := p.parseExpr(lab.expr)
+ p.popV()
+ if ok && lab.label != "" {
+ m := p.vstack[len(p.vstack)-1]
+ m[lab.label] = val
+ }
+ return val, ok
+}
+
+func (p *parser) parseLitMatcher(lit *litMatcher) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseLitMatcher"))
+ }
+
+ start := p.pt
+ for _, want := range lit.val {
+ cur := p.pt.rn
+ if lit.ignoreCase {
+ cur = unicode.ToLower(cur)
+ }
+ if cur != want {
+ p.restore(start)
+ return nil, false
+ }
+ p.read()
+ }
+ return p.sliceFrom(start), true
+}
+
+func (p *parser) parseNotCodeExpr(not *notCodeExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseNotCodeExpr"))
+ }
+
+ ok, err := not.run(p)
+ if err != nil {
+ p.addErr(err)
+ }
+ return nil, !ok
+}
+
+func (p *parser) parseNotExpr(not *notExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseNotExpr"))
+ }
+
+ pt := p.pt
+ p.pushV()
+ _, ok := p.parseExpr(not.expr)
+ p.popV()
+ p.restore(pt)
+ return nil, !ok
+}
+
+func (p *parser) parseOneOrMoreExpr(expr *oneOrMoreExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseOneOrMoreExpr"))
+ }
+
+ var vals []interface{}
+
+ for {
+ p.pushV()
+ val, ok := p.parseExpr(expr.expr)
+ p.popV()
+ if !ok {
+ if len(vals) == 0 {
+ // did not match once, no match
+ return nil, false
+ }
+ return vals, true
+ }
+ vals = append(vals, val)
+ }
+}
+
+func (p *parser) parseRuleRefExpr(ref *ruleRefExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseRuleRefExpr " + ref.name))
+ }
+
+ if ref.name == "" {
+ panic(fmt.Sprintf("%s: invalid rule: missing name", ref.pos))
+ }
+
+ rule := p.rules[ref.name]
+ if rule == nil {
+ p.addErr(fmt.Errorf("undefined rule: %s", ref.name))
+ return nil, false
+ }
+ return p.parseRule(rule)
+}
+
+func (p *parser) parseSeqExpr(seq *seqExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseSeqExpr"))
+ }
+
+ var vals []interface{}
+
+ pt := p.pt
+ for _, expr := range seq.exprs {
+ val, ok := p.parseExpr(expr)
+ if !ok {
+ p.restore(pt)
+ return nil, false
+ }
+ vals = append(vals, val)
+ }
+ return vals, true
+}
+
+func (p *parser) parseZeroOrMoreExpr(expr *zeroOrMoreExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseZeroOrMoreExpr"))
+ }
+
+ var vals []interface{}
+
+ for {
+ p.pushV()
+ val, ok := p.parseExpr(expr.expr)
+ p.popV()
+ if !ok {
+ return vals, true
+ }
+ vals = append(vals, val)
+ }
+}
+
+func (p *parser) parseZeroOrOneExpr(expr *zeroOrOneExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseZeroOrOneExpr"))
+ }
+
+ p.pushV()
+ val, _ := p.parseExpr(expr.expr)
+ p.popV()
+ // whether it matched or not, consider it a match
+ return val, true
+}
+
+func rangeTable(class string) *unicode.RangeTable {
+ if rt, ok := unicode.Categories[class]; ok {
+ return rt
+ }
+ if rt, ok := unicode.Properties[class]; ok {
+ return rt
+ }
+ if rt, ok := unicode.Scripts[class]; ok {
+ return rt
+ }
+
+ // cannot happen
+ panic(fmt.Sprintf("invalid Unicode class: %s", class))
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/examples/calculator/calculator.peg b/vendor/github.com/PuerkitoBio/pigeon/examples/calculator/calculator.peg
new file mode 100644
index 0000000000..63ddf85aed
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/examples/calculator/calculator.peg
@@ -0,0 +1,101 @@
+{
+// Command calculator is a small PEG-generated parser that computes
+// simple math using integers.
+//
+// Example usage: $ calculator "3 + (2 - 5 * 12)"
+//
+// Inspired by pegjs arithmetic example:
+// https://github.com/pegjs/pegjs/blob/master/examples/arithmetics.pegjs
+//
+package main
+
+var ops = map[string]func(int, int) int {
+ "+": func(l, r int) int {
+ return l + r
+ },
+ "-": func(l, r int) int {
+ return l - r
+ },
+ "*": func(l, r int) int {
+ return l * r
+ },
+ "/": func(l, r int) int {
+ return l / r
+ },
+}
+
+// for testing purpose
+var cntCodeBlocks int
+
+func main() {
+ if len(os.Args) != 2 {
+ log.Fatal("Usage: calculator 'EXPR'")
+ }
+ got, err := ParseReader("", strings.NewReader(os.Args[1]))
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Println("=", got)
+}
+
+func toIfaceSlice(v interface{}) []interface{} {
+ if v == nil {
+ return nil
+ }
+ return v.([]interface{})
+}
+
+func eval(first, rest interface{}) int {
+ l := first.(int)
+ restSl := toIfaceSlice(rest)
+ for _, v := range restSl {
+ restExpr := toIfaceSlice(v)
+ r := restExpr[3].(int)
+ op := restExpr[1].(string)
+ l = ops[op](l, r)
+ }
+ return l
+}
+}
+
+Input <- expr:Expr EOF {
+ cntCodeBlocks++
+ return expr, nil
+}
+
+Expr <- _ first:Term rest:( _ AddOp _ Term )* _ {
+ cntCodeBlocks++
+ return eval(first, rest), nil
+}
+
+Term <- first:Factor rest:( _ MulOp _ Factor )* {
+ cntCodeBlocks++
+ return eval(first, rest), nil
+}
+
+Factor <- '(' expr:Expr ')' {
+ cntCodeBlocks++
+ return expr, nil
+} / integer:Integer {
+ cntCodeBlocks++
+ return integer, nil
+}
+
+AddOp <- ( '+' / '-' ) {
+ cntCodeBlocks++
+ return string(c.text), nil
+}
+
+MulOp <- ( '*' / '/' ) {
+ cntCodeBlocks++
+ return string(c.text), nil
+}
+
+Integer <- '-'? [0-9]+ {
+ cntCodeBlocks++
+ return strconv.Atoi(string(c.text))
+}
+
+_ "whitespace" <- [ \n\t\r]*
+
+EOF <- !.
diff --git a/vendor/github.com/PuerkitoBio/pigeon/examples/calculator/calculator_test.go b/vendor/github.com/PuerkitoBio/pigeon/examples/calculator/calculator_test.go
new file mode 100644
index 0000000000..70a0767389
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/examples/calculator/calculator_test.go
@@ -0,0 +1,176 @@
+package main
+
+import "testing"
+
+var longishExpr = `
+18 + 3 - 27012 * ( (1234 - 43) / 7 ) + -4 * 8129
+`
+
+var validCases = map[string]int{
+ "0": 0,
+ "1": 1,
+ "-1": -1,
+ "10": 10,
+ "-10": -10,
+
+ "(0)": 0,
+ "(1)": 1,
+ "(-1)": -1,
+ "(10)": 10,
+ "(-10)": -10,
+
+ "1+1": 2,
+ "1-1": 0,
+ "1*1": 1,
+ "1/1": 1,
+ "1 + 1": 2,
+ "1 - 1": 0,
+ "1 * 1": 1,
+ "1 / 1": 1,
+
+ "1+0": 1,
+ "1-0": 1,
+ "1*0": 0,
+ "1 + 0": 1,
+ "1 - 0": 1,
+ "1 * 0": 0,
+
+ "1\n+\t2\r\n +\n3\n": 6,
+ "(2) * 3": 6,
+
+ " 1 + 2 - 3 * 4 / 5 ": 1,
+ " 1 + (2 - 3) * 4 / 5 ": 1,
+ " (1 + 2 - 3) * 4 / 5 ": 0,
+ " 1 + 2 - (3 * 4) / 5 ": 1,
+ " 18 + 3 - 27 * (-18 / -3)": -141,
+ longishExpr: -4624535,
+}
+
+func TestValidCases(t *testing.T) {
+ for tc, exp := range validCases {
+ got, err := Parse("", []byte(tc))
+ if err != nil {
+ t.Errorf("%q: want no error, got %v", tc, err)
+ continue
+ }
+ goti, ok := got.(int)
+ if !ok {
+ t.Errorf("%q: want type %T, got %T", tc, exp, got)
+ continue
+ }
+ if exp != goti {
+ t.Errorf("%q: want %d, got %d", tc, exp, goti)
+ }
+ }
+}
+
+var invalidCases = map[string]string{
+ "": "1:1 (0): no match found",
+ "(": "1:1 (0): no match found",
+ ")": "1:1 (0): no match found",
+ "()": "1:1 (0): no match found",
+ "+": "1:1 (0): no match found",
+ "-": "1:1 (0): no match found",
+ "*": "1:1 (0): no match found",
+ "/": "1:1 (0): no match found",
+ "+1": "1:1 (0): no match found",
+ "*1": "1:1 (0): no match found",
+ "/1": "1:1 (0): no match found",
+ "1/0": "1:4 (3): rule Term: runtime error: integer divide by zero",
+ "1+": "1:1 (0): no match found",
+ "1-": "1:1 (0): no match found",
+ "1*": "1:1 (0): no match found",
+ "1/": "1:1 (0): no match found",
+ "1 (+ 2)": "1:1 (0): no match found",
+ "1 (2)": "1:1 (0): no match found",
+ "\xfe": "1:1 (0): invalid encoding",
+}
+
+func TestInvalidCases(t *testing.T) {
+ for tc, exp := range invalidCases {
+ got, err := Parse("", []byte(tc))
+ if err == nil {
+ t.Errorf("%q: want error, got none (%v)", tc, got)
+ continue
+ }
+ el, ok := err.(errList)
+ if !ok {
+ t.Errorf("%q: want error type %T, got %T", tc, &errList{}, err)
+ continue
+ }
+ for _, e := range el {
+ if _, ok := e.(*parserError); !ok {
+ t.Errorf("%q: want all individual errors to be %T, got %T (%[3]v)", tc, &parserError{}, e)
+ }
+ }
+ if exp != err.Error() {
+ t.Errorf("%q: want \n%s\n, got \n%s\n", tc, exp, err)
+ }
+ }
+}
+
+func TestPanicNoRecover(t *testing.T) {
+ defer func() {
+ if e := recover(); e != nil {
+ // all good
+ return
+ }
+ t.Fatal("want panic, got none")
+ }()
+
+ // should panic
+ Parse("", []byte("1 / 0"), Recover(false))
+}
+
+func TestMemoization(t *testing.T) {
+ in := " 2 + 35 * ( 18 - -4 / ( 5 + 1) ) * 456 + -1"
+ want := 287281
+
+ p := newParser("", []byte(in), Memoize(false))
+ got, err := p.parse(g)
+ if err != nil {
+ t.Fatal(err)
+ }
+ goti := got.(int)
+ if goti != want {
+ t.Errorf("want %d, got %d", want, goti)
+ }
+ if p.exprCnt != 415 {
+ t.Errorf("with Memoize=false, want %d expressions evaluated, got %d", 415, p.exprCnt)
+ }
+
+ p = newParser("", []byte(in), Memoize(true))
+ got, err = p.parse(g)
+ if err != nil {
+ t.Fatal(err)
+ }
+ goti = got.(int)
+ if goti != want {
+ t.Errorf("want %d, got %d", want, goti)
+ }
+ if p.exprCnt != 389 {
+ t.Errorf("with Memoize=true, want %d expressions evaluated, got %d", 389, p.exprCnt)
+ }
+}
+
+func BenchmarkPigeonCalculatorNoMemo(b *testing.B) {
+ d := []byte(longishExpr)
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ if _, err := Parse("", d, Memoize(false)); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkPigeonCalculatorMemo(b *testing.B) {
+ d := []byte(longishExpr)
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ if _, err := Parse("", d, Memoize(true)); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/examples/json/json.go b/vendor/github.com/PuerkitoBio/pigeon/examples/json/json.go
new file mode 100644
index 0000000000..c06cb29ce6
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/examples/json/json.go
@@ -0,0 +1,1624 @@
+// Command json parses JSON as defined by [1].
+//
+// BUGS: the escaped forward solidus (`\/`) is not currently handled.
+//
+// [1]: http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-404.pdf
+package main
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+func main() {
+ in := os.Stdin
+ nm := "stdin"
+ if len(os.Args) > 1 {
+ f, err := os.Open(os.Args[1])
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer f.Close()
+ in = f
+ nm = os.Args[1]
+ }
+
+ got, err := ParseReader(nm, in)
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Println(got)
+}
+
+func toIfaceSlice(v interface{}) []interface{} {
+ if v == nil {
+ return nil
+ }
+ return v.([]interface{})
+}
+
+var g = &grammar{
+ rules: []*rule{
+ {
+ name: "JSON",
+ pos: position{line: 37, col: 1, offset: 702},
+ expr: &actionExpr{
+ pos: position{line: 37, col: 8, offset: 711},
+ run: (*parser).callonJSON1,
+ expr: &seqExpr{
+ pos: position{line: 37, col: 8, offset: 711},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 37, col: 8, offset: 711},
+ name: "_",
+ },
+ &labeledExpr{
+ pos: position{line: 37, col: 10, offset: 713},
+ label: "vals",
+ expr: &oneOrMoreExpr{
+ pos: position{line: 37, col: 15, offset: 718},
+ expr: &ruleRefExpr{
+ pos: position{line: 37, col: 15, offset: 718},
+ name: "Value",
+ },
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 37, col: 22, offset: 725},
+ name: "EOF",
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Value",
+ pos: position{line: 49, col: 1, offset: 916},
+ expr: &actionExpr{
+ pos: position{line: 49, col: 9, offset: 926},
+ run: (*parser).callonValue1,
+ expr: &seqExpr{
+ pos: position{line: 49, col: 9, offset: 926},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 49, col: 9, offset: 926},
+ label: "val",
+ expr: &choiceExpr{
+ pos: position{line: 49, col: 15, offset: 932},
+ alternatives: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 49, col: 15, offset: 932},
+ name: "Object",
+ },
+ &ruleRefExpr{
+ pos: position{line: 49, col: 24, offset: 941},
+ name: "Array",
+ },
+ &ruleRefExpr{
+ pos: position{line: 49, col: 32, offset: 949},
+ name: "Number",
+ },
+ &ruleRefExpr{
+ pos: position{line: 49, col: 41, offset: 958},
+ name: "String",
+ },
+ &ruleRefExpr{
+ pos: position{line: 49, col: 50, offset: 967},
+ name: "Bool",
+ },
+ &ruleRefExpr{
+ pos: position{line: 49, col: 57, offset: 974},
+ name: "Null",
+ },
+ },
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 49, col: 64, offset: 981},
+ name: "_",
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Object",
+ pos: position{line: 53, col: 1, offset: 1008},
+ expr: &actionExpr{
+ pos: position{line: 53, col: 10, offset: 1019},
+ run: (*parser).callonObject1,
+ expr: &seqExpr{
+ pos: position{line: 53, col: 10, offset: 1019},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 53, col: 10, offset: 1019},
+ val: "{",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 53, col: 14, offset: 1023},
+ name: "_",
+ },
+ &labeledExpr{
+ pos: position{line: 53, col: 16, offset: 1025},
+ label: "vals",
+ expr: &zeroOrOneExpr{
+ pos: position{line: 53, col: 21, offset: 1030},
+ expr: &seqExpr{
+ pos: position{line: 53, col: 23, offset: 1032},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 53, col: 23, offset: 1032},
+ name: "String",
+ },
+ &ruleRefExpr{
+ pos: position{line: 53, col: 30, offset: 1039},
+ name: "_",
+ },
+ &litMatcher{
+ pos: position{line: 53, col: 32, offset: 1041},
+ val: ":",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 53, col: 36, offset: 1045},
+ name: "_",
+ },
+ &ruleRefExpr{
+ pos: position{line: 53, col: 38, offset: 1047},
+ name: "Value",
+ },
+ &zeroOrMoreExpr{
+ pos: position{line: 53, col: 44, offset: 1053},
+ expr: &seqExpr{
+ pos: position{line: 53, col: 46, offset: 1055},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 53, col: 46, offset: 1055},
+ val: ",",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 53, col: 50, offset: 1059},
+ name: "_",
+ },
+ &ruleRefExpr{
+ pos: position{line: 53, col: 52, offset: 1061},
+ name: "String",
+ },
+ &ruleRefExpr{
+ pos: position{line: 53, col: 59, offset: 1068},
+ name: "_",
+ },
+ &litMatcher{
+ pos: position{line: 53, col: 61, offset: 1070},
+ val: ":",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 53, col: 65, offset: 1074},
+ name: "_",
+ },
+ &ruleRefExpr{
+ pos: position{line: 53, col: 67, offset: 1076},
+ name: "Value",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ &litMatcher{
+ pos: position{line: 53, col: 79, offset: 1088},
+ val: "}",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Array",
+ pos: position{line: 68, col: 1, offset: 1430},
+ expr: &actionExpr{
+ pos: position{line: 68, col: 9, offset: 1440},
+ run: (*parser).callonArray1,
+ expr: &seqExpr{
+ pos: position{line: 68, col: 9, offset: 1440},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 68, col: 9, offset: 1440},
+ val: "[",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 68, col: 13, offset: 1444},
+ name: "_",
+ },
+ &labeledExpr{
+ pos: position{line: 68, col: 15, offset: 1446},
+ label: "vals",
+ expr: &zeroOrOneExpr{
+ pos: position{line: 68, col: 20, offset: 1451},
+ expr: &seqExpr{
+ pos: position{line: 68, col: 22, offset: 1453},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 68, col: 22, offset: 1453},
+ name: "Value",
+ },
+ &zeroOrMoreExpr{
+ pos: position{line: 68, col: 28, offset: 1459},
+ expr: &seqExpr{
+ pos: position{line: 68, col: 30, offset: 1461},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 68, col: 30, offset: 1461},
+ val: ",",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 68, col: 34, offset: 1465},
+ name: "_",
+ },
+ &ruleRefExpr{
+ pos: position{line: 68, col: 36, offset: 1467},
+ name: "Value",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ &litMatcher{
+ pos: position{line: 68, col: 48, offset: 1479},
+ val: "]",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Number",
+ pos: position{line: 82, col: 1, offset: 1785},
+ expr: &actionExpr{
+ pos: position{line: 82, col: 10, offset: 1796},
+ run: (*parser).callonNumber1,
+ expr: &seqExpr{
+ pos: position{line: 82, col: 10, offset: 1796},
+ exprs: []interface{}{
+ &zeroOrOneExpr{
+ pos: position{line: 82, col: 10, offset: 1796},
+ expr: &litMatcher{
+ pos: position{line: 82, col: 10, offset: 1796},
+ val: "-",
+ ignoreCase: false,
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 82, col: 15, offset: 1801},
+ name: "Integer",
+ },
+ &zeroOrOneExpr{
+ pos: position{line: 82, col: 23, offset: 1809},
+ expr: &seqExpr{
+ pos: position{line: 82, col: 25, offset: 1811},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 82, col: 25, offset: 1811},
+ val: ".",
+ ignoreCase: false,
+ },
+ &oneOrMoreExpr{
+ pos: position{line: 82, col: 29, offset: 1815},
+ expr: &ruleRefExpr{
+ pos: position{line: 82, col: 29, offset: 1815},
+ name: "DecimalDigit",
+ },
+ },
+ },
+ },
+ },
+ &zeroOrOneExpr{
+ pos: position{line: 82, col: 46, offset: 1832},
+ expr: &ruleRefExpr{
+ pos: position{line: 82, col: 46, offset: 1832},
+ name: "Exponent",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Integer",
+ pos: position{line: 88, col: 1, offset: 1987},
+ expr: &choiceExpr{
+ pos: position{line: 88, col: 11, offset: 1999},
+ alternatives: []interface{}{
+ &litMatcher{
+ pos: position{line: 88, col: 11, offset: 1999},
+ val: "0",
+ ignoreCase: false,
+ },
+ &seqExpr{
+ pos: position{line: 88, col: 17, offset: 2005},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 88, col: 17, offset: 2005},
+ name: "NonZeroDecimalDigit",
+ },
+ &zeroOrMoreExpr{
+ pos: position{line: 88, col: 37, offset: 2025},
+ expr: &ruleRefExpr{
+ pos: position{line: 88, col: 37, offset: 2025},
+ name: "DecimalDigit",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Exponent",
+ pos: position{line: 90, col: 1, offset: 2040},
+ expr: &seqExpr{
+ pos: position{line: 90, col: 12, offset: 2053},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 90, col: 12, offset: 2053},
+ val: "e",
+ ignoreCase: true,
+ },
+ &zeroOrOneExpr{
+ pos: position{line: 90, col: 17, offset: 2058},
+ expr: &charClassMatcher{
+ pos: position{line: 90, col: 17, offset: 2058},
+ val: "[+-]",
+ chars: []rune{'+', '-'},
+ ignoreCase: false,
+ inverted: false,
+ },
+ },
+ &oneOrMoreExpr{
+ pos: position{line: 90, col: 23, offset: 2064},
+ expr: &ruleRefExpr{
+ pos: position{line: 90, col: 23, offset: 2064},
+ name: "DecimalDigit",
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "String",
+ pos: position{line: 92, col: 1, offset: 2079},
+ expr: &actionExpr{
+ pos: position{line: 92, col: 10, offset: 2090},
+ run: (*parser).callonString1,
+ expr: &seqExpr{
+ pos: position{line: 92, col: 10, offset: 2090},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 92, col: 10, offset: 2090},
+ val: "\"",
+ ignoreCase: false,
+ },
+ &zeroOrMoreExpr{
+ pos: position{line: 92, col: 14, offset: 2094},
+ expr: &choiceExpr{
+ pos: position{line: 92, col: 16, offset: 2096},
+ alternatives: []interface{}{
+ &seqExpr{
+ pos: position{line: 92, col: 16, offset: 2096},
+ exprs: []interface{}{
+ ¬Expr{
+ pos: position{line: 92, col: 16, offset: 2096},
+ expr: &ruleRefExpr{
+ pos: position{line: 92, col: 17, offset: 2097},
+ name: "EscapedChar",
+ },
+ },
+ &anyMatcher{
+ line: 92, col: 29, offset: 2109,
+ },
+ },
+ },
+ &seqExpr{
+ pos: position{line: 92, col: 33, offset: 2113},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 92, col: 33, offset: 2113},
+ val: "\\",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 92, col: 38, offset: 2118},
+ name: "EscapeSequence",
+ },
+ },
+ },
+ },
+ },
+ },
+ &litMatcher{
+ pos: position{line: 92, col: 56, offset: 2136},
+ val: "\"",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "EscapedChar",
+ pos: position{line: 98, col: 1, offset: 2308},
+ expr: &charClassMatcher{
+ pos: position{line: 98, col: 15, offset: 2324},
+ val: "[\\x00-\\x1f\"\\\\]",
+ chars: []rune{'"', '\\'},
+ ranges: []rune{'\x00', '\x1f'},
+ ignoreCase: false,
+ inverted: false,
+ },
+ },
+ {
+ name: "EscapeSequence",
+ pos: position{line: 100, col: 1, offset: 2340},
+ expr: &choiceExpr{
+ pos: position{line: 100, col: 18, offset: 2359},
+ alternatives: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 100, col: 18, offset: 2359},
+ name: "SingleCharEscape",
+ },
+ &ruleRefExpr{
+ pos: position{line: 100, col: 37, offset: 2378},
+ name: "UnicodeEscape",
+ },
+ },
+ },
+ },
+ {
+ name: "SingleCharEscape",
+ pos: position{line: 102, col: 1, offset: 2393},
+ expr: &charClassMatcher{
+ pos: position{line: 102, col: 20, offset: 2414},
+ val: "[\"\\\\/bfnrt]",
+ chars: []rune{'"', '\\', '/', 'b', 'f', 'n', 'r', 't'},
+ ignoreCase: false,
+ inverted: false,
+ },
+ },
+ {
+ name: "UnicodeEscape",
+ pos: position{line: 104, col: 1, offset: 2427},
+ expr: &seqExpr{
+ pos: position{line: 104, col: 17, offset: 2445},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 104, col: 17, offset: 2445},
+ val: "u",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 104, col: 21, offset: 2449},
+ name: "HexDigit",
+ },
+ &ruleRefExpr{
+ pos: position{line: 104, col: 30, offset: 2458},
+ name: "HexDigit",
+ },
+ &ruleRefExpr{
+ pos: position{line: 104, col: 39, offset: 2467},
+ name: "HexDigit",
+ },
+ &ruleRefExpr{
+ pos: position{line: 104, col: 48, offset: 2476},
+ name: "HexDigit",
+ },
+ },
+ },
+ },
+ {
+ name: "DecimalDigit",
+ pos: position{line: 106, col: 1, offset: 2486},
+ expr: &charClassMatcher{
+ pos: position{line: 106, col: 16, offset: 2503},
+ val: "[0-9]",
+ ranges: []rune{'0', '9'},
+ ignoreCase: false,
+ inverted: false,
+ },
+ },
+ {
+ name: "NonZeroDecimalDigit",
+ pos: position{line: 108, col: 1, offset: 2510},
+ expr: &charClassMatcher{
+ pos: position{line: 108, col: 23, offset: 2534},
+ val: "[1-9]",
+ ranges: []rune{'1', '9'},
+ ignoreCase: false,
+ inverted: false,
+ },
+ },
+ {
+ name: "HexDigit",
+ pos: position{line: 110, col: 1, offset: 2541},
+ expr: &charClassMatcher{
+ pos: position{line: 110, col: 12, offset: 2554},
+ val: "[0-9a-f]i",
+ ranges: []rune{'0', '9', 'a', 'f'},
+ ignoreCase: true,
+ inverted: false,
+ },
+ },
+ {
+ name: "Bool",
+ pos: position{line: 112, col: 1, offset: 2565},
+ expr: &choiceExpr{
+ pos: position{line: 112, col: 8, offset: 2574},
+ alternatives: []interface{}{
+ &actionExpr{
+ pos: position{line: 112, col: 8, offset: 2574},
+ run: (*parser).callonBool2,
+ expr: &litMatcher{
+ pos: position{line: 112, col: 8, offset: 2574},
+ val: "true",
+ ignoreCase: false,
+ },
+ },
+ &actionExpr{
+ pos: position{line: 112, col: 38, offset: 2604},
+ run: (*parser).callonBool4,
+ expr: &litMatcher{
+ pos: position{line: 112, col: 38, offset: 2604},
+ val: "false",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Null",
+ pos: position{line: 114, col: 1, offset: 2635},
+ expr: &actionExpr{
+ pos: position{line: 114, col: 8, offset: 2644},
+ run: (*parser).callonNull1,
+ expr: &litMatcher{
+ pos: position{line: 114, col: 8, offset: 2644},
+ val: "null",
+ ignoreCase: false,
+ },
+ },
+ },
+ {
+ name: "_",
+ displayName: "\"whitespace\"",
+ pos: position{line: 116, col: 1, offset: 2672},
+ expr: &zeroOrMoreExpr{
+ pos: position{line: 116, col: 18, offset: 2691},
+ expr: &charClassMatcher{
+ pos: position{line: 116, col: 18, offset: 2691},
+ val: "[ \\t\\r\\n]",
+ chars: []rune{' ', '\t', '\r', '\n'},
+ ignoreCase: false,
+ inverted: false,
+ },
+ },
+ },
+ {
+ name: "EOF",
+ pos: position{line: 118, col: 1, offset: 2703},
+ expr: ¬Expr{
+ pos: position{line: 118, col: 7, offset: 2711},
+ expr: &anyMatcher{
+ line: 118, col: 8, offset: 2712,
+ },
+ },
+ },
+ },
+}
+
+func (c *current) onJSON1(vals interface{}) (interface{}, error) {
+ valsSl := toIfaceSlice(vals)
+ switch len(valsSl) {
+ case 0:
+ return nil, nil
+ case 1:
+ return valsSl[0], nil
+ default:
+ return valsSl, nil
+ }
+}
+
+func (p *parser) callonJSON1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onJSON1(stack["vals"])
+}
+
+func (c *current) onValue1(val interface{}) (interface{}, error) {
+ return val, nil
+}
+
+func (p *parser) callonValue1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onValue1(stack["val"])
+}
+
+func (c *current) onObject1(vals interface{}) (interface{}, error) {
+ res := make(map[string]interface{})
+ valsSl := toIfaceSlice(vals)
+ if len(valsSl) == 0 {
+ return res, nil
+ }
+ res[valsSl[0].(string)] = valsSl[4]
+ restSl := toIfaceSlice(valsSl[5])
+ for _, v := range restSl {
+ vSl := toIfaceSlice(v)
+ res[vSl[2].(string)] = vSl[6]
+ }
+ return res, nil
+}
+
+func (p *parser) callonObject1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onObject1(stack["vals"])
+}
+
+func (c *current) onArray1(vals interface{}) (interface{}, error) {
+ valsSl := toIfaceSlice(vals)
+ if len(valsSl) == 0 {
+ return []interface{}{}, nil
+ }
+ res := []interface{}{valsSl[0]}
+ restSl := toIfaceSlice(valsSl[1])
+ for _, v := range restSl {
+ vSl := toIfaceSlice(v)
+ res = append(res, vSl[2])
+ }
+ return res, nil
+}
+
+func (p *parser) callonArray1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onArray1(stack["vals"])
+}
+
+func (c *current) onNumber1() (interface{}, error) {
+ // JSON numbers have the same syntax as Go's, and are parseable using
+ // strconv.
+ return strconv.ParseFloat(string(c.text), 64)
+}
+
+func (p *parser) callonNumber1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onNumber1()
+}
+
+func (c *current) onString1() (interface{}, error) {
+ // TODO : the forward slash (solidus) is not a valid escape in Go, it will
+ // fail if there's one in the string
+ return strconv.Unquote(string(c.text))
+}
+
+func (p *parser) callonString1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onString1()
+}
+
+func (c *current) onBool2() (interface{}, error) {
+ return true, nil
+}
+
+func (p *parser) callonBool2() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onBool2()
+}
+
+func (c *current) onBool4() (interface{}, error) {
+ return false, nil
+}
+
+func (p *parser) callonBool4() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onBool4()
+}
+
+func (c *current) onNull1() (interface{}, error) {
+ return nil, nil
+}
+
+func (p *parser) callonNull1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onNull1()
+}
+
+var (
+ // errNoRule is returned when the grammar to parse has no rule.
+ errNoRule = errors.New("grammar has no rule")
+
+ // errInvalidEncoding is returned when the source is not properly
+ // utf8-encoded.
+ errInvalidEncoding = errors.New("invalid encoding")
+
+ // errNoMatch is returned if no match could be found.
+ errNoMatch = errors.New("no match found")
+)
+
+// Option is a function that can set an option on the parser. It returns
+// the previous setting as an Option.
+type Option func(*parser) Option
+
+// Debug creates an Option to set the debug flag to b. When set to true,
+// debugging information is printed to stdout while parsing.
+//
+// The default is false.
+func Debug(b bool) Option {
+ return func(p *parser) Option {
+ old := p.debug
+ p.debug = b
+ return Debug(old)
+ }
+}
+
+// Memoize creates an Option to set the memoize flag to b. When set to true,
+// the parser will cache all results so each expression is evaluated only
+// once. This guarantees linear parsing time even for pathological cases,
+// at the expense of more memory and slower times for typical cases.
+//
+// The default is false.
+func Memoize(b bool) Option {
+ return func(p *parser) Option {
+ old := p.memoize
+ p.memoize = b
+ return Memoize(old)
+ }
+}
+
+// Recover creates an Option to set the recover flag to b. When set to
+// true, this causes the parser to recover from panics and convert it
+// to an error. Setting it to false can be useful while debugging to
+// access the full stack trace.
+//
+// The default is true.
+func Recover(b bool) Option {
+ return func(p *parser) Option {
+ old := p.recover
+ p.recover = b
+ return Recover(old)
+ }
+}
+
+// ParseFile parses the file identified by filename.
+func ParseFile(filename string, opts ...Option) (interface{}, error) {
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ return ParseReader(filename, f, opts...)
+}
+
+// ParseReader parses the data from r using filename as information in the
+// error messages.
+func ParseReader(filename string, r io.Reader, opts ...Option) (interface{}, error) {
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+
+ return Parse(filename, b, opts...)
+}
+
+// Parse parses the data from b using filename as information in the
+// error messages.
+func Parse(filename string, b []byte, opts ...Option) (interface{}, error) {
+ return newParser(filename, b, opts...).parse(g)
+}
+
+// position records a position in the text.
+type position struct {
+ line, col, offset int
+}
+
+func (p position) String() string {
+ return fmt.Sprintf("%d:%d [%d]", p.line, p.col, p.offset)
+}
+
+// savepoint stores all state required to go back to this point in the
+// parser.
+type savepoint struct {
+ position
+ rn rune
+ w int
+}
+
+type current struct {
+ pos position // start position of the match
+ text []byte // raw text of the match
+}
+
+// the AST types...
+
+type grammar struct {
+ pos position
+ rules []*rule
+}
+
+type rule struct {
+ pos position
+ name string
+ displayName string
+ expr interface{}
+}
+
+type choiceExpr struct {
+ pos position
+ alternatives []interface{}
+}
+
+type actionExpr struct {
+ pos position
+ expr interface{}
+ run func(*parser) (interface{}, error)
+}
+
+type seqExpr struct {
+ pos position
+ exprs []interface{}
+}
+
+type labeledExpr struct {
+ pos position
+ label string
+ expr interface{}
+}
+
+type expr struct {
+ pos position
+ expr interface{}
+}
+
+type andExpr expr
+type notExpr expr
+type zeroOrOneExpr expr
+type zeroOrMoreExpr expr
+type oneOrMoreExpr expr
+
+type ruleRefExpr struct {
+ pos position
+ name string
+}
+
+type andCodeExpr struct {
+ pos position
+ run func(*parser) (bool, error)
+}
+
+type notCodeExpr struct {
+ pos position
+ run func(*parser) (bool, error)
+}
+
+type litMatcher struct {
+ pos position
+ val string
+ ignoreCase bool
+}
+
+type charClassMatcher struct {
+ pos position
+ val string
+ chars []rune
+ ranges []rune
+ classes []*unicode.RangeTable
+ ignoreCase bool
+ inverted bool
+}
+
+type anyMatcher position
+
+// errList cumulates the errors found by the parser.
+type errList []error
+
+func (e *errList) add(err error) {
+ *e = append(*e, err)
+}
+
+func (e errList) err() error {
+ if len(e) == 0 {
+ return nil
+ }
+ e.dedupe()
+ return e
+}
+
+func (e *errList) dedupe() {
+ var cleaned []error
+ set := make(map[string]bool)
+ for _, err := range *e {
+ if msg := err.Error(); !set[msg] {
+ set[msg] = true
+ cleaned = append(cleaned, err)
+ }
+ }
+ *e = cleaned
+}
+
+func (e errList) Error() string {
+ switch len(e) {
+ case 0:
+ return ""
+ case 1:
+ return e[0].Error()
+ default:
+ var buf bytes.Buffer
+
+ for i, err := range e {
+ if i > 0 {
+ buf.WriteRune('\n')
+ }
+ buf.WriteString(err.Error())
+ }
+ return buf.String()
+ }
+}
+
+// parserError wraps an error with a prefix indicating the rule in which
+// the error occurred. The original error is stored in the Inner field.
+type parserError struct {
+ Inner error
+ pos position
+ prefix string
+}
+
+// Error returns the error message.
+func (p *parserError) Error() string {
+ return p.prefix + ": " + p.Inner.Error()
+}
+
+// newParser creates a parser with the specified input source and options.
+func newParser(filename string, b []byte, opts ...Option) *parser {
+ p := &parser{
+ filename: filename,
+ errs: new(errList),
+ data: b,
+ pt: savepoint{position: position{line: 1}},
+ recover: true,
+ }
+ p.setOptions(opts)
+ return p
+}
+
+// setOptions applies the options to the parser.
+func (p *parser) setOptions(opts []Option) {
+ for _, opt := range opts {
+ opt(p)
+ }
+}
+
+type resultTuple struct {
+ v interface{}
+ b bool
+ end savepoint
+}
+
+type parser struct {
+ filename string
+ pt savepoint
+ cur current
+
+ data []byte
+ errs *errList
+
+ recover bool
+ debug bool
+ depth int
+
+ memoize bool
+ // memoization table for the packrat algorithm:
+ // map[offset in source] map[expression or rule] {value, match}
+ memo map[int]map[interface{}]resultTuple
+
+ // rules table, maps the rule identifier to the rule node
+ rules map[string]*rule
+ // variables stack, map of label to value
+ vstack []map[string]interface{}
+ // rule stack, allows identification of the current rule in errors
+ rstack []*rule
+
+ // stats
+ exprCnt int
+}
+
+// push a variable set on the vstack.
+func (p *parser) pushV() {
+ if cap(p.vstack) == len(p.vstack) {
+ // create new empty slot in the stack
+ p.vstack = append(p.vstack, nil)
+ } else {
+ // slice to 1 more
+ p.vstack = p.vstack[:len(p.vstack)+1]
+ }
+
+ // get the last args set
+ m := p.vstack[len(p.vstack)-1]
+ if m != nil && len(m) == 0 {
+ // empty map, all good
+ return
+ }
+
+ m = make(map[string]interface{})
+ p.vstack[len(p.vstack)-1] = m
+}
+
+// pop a variable set from the vstack.
+func (p *parser) popV() {
+ // if the map is not empty, clear it
+ m := p.vstack[len(p.vstack)-1]
+ if len(m) > 0 {
+ // GC that map
+ p.vstack[len(p.vstack)-1] = nil
+ }
+ p.vstack = p.vstack[:len(p.vstack)-1]
+}
+
+func (p *parser) print(prefix, s string) string {
+ if !p.debug {
+ return s
+ }
+
+ fmt.Printf("%s %d:%d:%d: %s [%#U]\n",
+ prefix, p.pt.line, p.pt.col, p.pt.offset, s, p.pt.rn)
+ return s
+}
+
+func (p *parser) in(s string) string {
+ p.depth++
+ return p.print(strings.Repeat(" ", p.depth)+">", s)
+}
+
+func (p *parser) out(s string) string {
+ p.depth--
+ return p.print(strings.Repeat(" ", p.depth)+"<", s)
+}
+
+func (p *parser) addErr(err error) {
+ p.addErrAt(err, p.pt.position)
+}
+
+func (p *parser) addErrAt(err error, pos position) {
+ var buf bytes.Buffer
+ if p.filename != "" {
+ buf.WriteString(p.filename)
+ }
+ if buf.Len() > 0 {
+ buf.WriteString(":")
+ }
+ buf.WriteString(fmt.Sprintf("%d:%d (%d)", pos.line, pos.col, pos.offset))
+ if len(p.rstack) > 0 {
+ if buf.Len() > 0 {
+ buf.WriteString(": ")
+ }
+ rule := p.rstack[len(p.rstack)-1]
+ if rule.displayName != "" {
+ buf.WriteString("rule " + rule.displayName)
+ } else {
+ buf.WriteString("rule " + rule.name)
+ }
+ }
+ pe := &parserError{Inner: err, prefix: buf.String()}
+ p.errs.add(pe)
+}
+
+// read advances the parser to the next rune.
+func (p *parser) read() {
+ p.pt.offset += p.pt.w
+ rn, n := utf8.DecodeRune(p.data[p.pt.offset:])
+ p.pt.rn = rn
+ p.pt.w = n
+ p.pt.col++
+ if rn == '\n' {
+ p.pt.line++
+ p.pt.col = 0
+ }
+
+ if rn == utf8.RuneError {
+ if n > 0 {
+ p.addErr(errInvalidEncoding)
+ }
+ }
+}
+
+// restore parser position to the savepoint pt.
+func (p *parser) restore(pt savepoint) {
+ if p.debug {
+ defer p.out(p.in("restore"))
+ }
+ if pt.offset == p.pt.offset {
+ return
+ }
+ p.pt = pt
+}
+
+// get the slice of bytes from the savepoint start to the current position.
+func (p *parser) sliceFrom(start savepoint) []byte {
+ return p.data[start.position.offset:p.pt.position.offset]
+}
+
+func (p *parser) getMemoized(node interface{}) (resultTuple, bool) {
+ if len(p.memo) == 0 {
+ return resultTuple{}, false
+ }
+ m := p.memo[p.pt.offset]
+ if len(m) == 0 {
+ return resultTuple{}, false
+ }
+ res, ok := m[node]
+ return res, ok
+}
+
+func (p *parser) setMemoized(pt savepoint, node interface{}, tuple resultTuple) {
+ if p.memo == nil {
+ p.memo = make(map[int]map[interface{}]resultTuple)
+ }
+ m := p.memo[pt.offset]
+ if m == nil {
+ m = make(map[interface{}]resultTuple)
+ p.memo[pt.offset] = m
+ }
+ m[node] = tuple
+}
+
+func (p *parser) buildRulesTable(g *grammar) {
+ p.rules = make(map[string]*rule, len(g.rules))
+ for _, r := range g.rules {
+ p.rules[r.name] = r
+ }
+}
+
+func (p *parser) parse(g *grammar) (val interface{}, err error) {
+ if len(g.rules) == 0 {
+ p.addErr(errNoRule)
+ return nil, p.errs.err()
+ }
+
+ // TODO : not super critical but this could be generated
+ p.buildRulesTable(g)
+
+ if p.recover {
+ // panic can be used in action code to stop parsing immediately
+ // and return the panic as an error.
+ defer func() {
+ if e := recover(); e != nil {
+ if p.debug {
+ defer p.out(p.in("panic handler"))
+ }
+ val = nil
+ switch e := e.(type) {
+ case error:
+ p.addErr(e)
+ default:
+ p.addErr(fmt.Errorf("%v", e))
+ }
+ err = p.errs.err()
+ }
+ }()
+ }
+
+ // start rule is rule [0]
+ p.read() // advance to first rune
+ val, ok := p.parseRule(g.rules[0])
+ if !ok {
+ if len(*p.errs) == 0 {
+ // make sure this doesn't go out silently
+ p.addErr(errNoMatch)
+ }
+ return nil, p.errs.err()
+ }
+ return val, p.errs.err()
+}
+
+func (p *parser) parseRule(rule *rule) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseRule " + rule.name))
+ }
+
+ if p.memoize {
+ res, ok := p.getMemoized(rule)
+ if ok {
+ p.restore(res.end)
+ return res.v, res.b
+ }
+ }
+
+ start := p.pt
+ p.rstack = append(p.rstack, rule)
+ p.pushV()
+ val, ok := p.parseExpr(rule.expr)
+ p.popV()
+ p.rstack = p.rstack[:len(p.rstack)-1]
+ if ok && p.debug {
+ p.print(strings.Repeat(" ", p.depth)+"MATCH", string(p.sliceFrom(start)))
+ }
+
+ if p.memoize {
+ p.setMemoized(start, rule, resultTuple{val, ok, p.pt})
+ }
+ return val, ok
+}
+
+func (p *parser) parseExpr(expr interface{}) (interface{}, bool) {
+ var pt savepoint
+ var ok bool
+
+ if p.memoize {
+ res, ok := p.getMemoized(expr)
+ if ok {
+ p.restore(res.end)
+ return res.v, res.b
+ }
+ pt = p.pt
+ }
+
+ p.exprCnt++
+ var val interface{}
+ switch expr := expr.(type) {
+ case *actionExpr:
+ val, ok = p.parseActionExpr(expr)
+ case *andCodeExpr:
+ val, ok = p.parseAndCodeExpr(expr)
+ case *andExpr:
+ val, ok = p.parseAndExpr(expr)
+ case *anyMatcher:
+ val, ok = p.parseAnyMatcher(expr)
+ case *charClassMatcher:
+ val, ok = p.parseCharClassMatcher(expr)
+ case *choiceExpr:
+ val, ok = p.parseChoiceExpr(expr)
+ case *labeledExpr:
+ val, ok = p.parseLabeledExpr(expr)
+ case *litMatcher:
+ val, ok = p.parseLitMatcher(expr)
+ case *notCodeExpr:
+ val, ok = p.parseNotCodeExpr(expr)
+ case *notExpr:
+ val, ok = p.parseNotExpr(expr)
+ case *oneOrMoreExpr:
+ val, ok = p.parseOneOrMoreExpr(expr)
+ case *ruleRefExpr:
+ val, ok = p.parseRuleRefExpr(expr)
+ case *seqExpr:
+ val, ok = p.parseSeqExpr(expr)
+ case *zeroOrMoreExpr:
+ val, ok = p.parseZeroOrMoreExpr(expr)
+ case *zeroOrOneExpr:
+ val, ok = p.parseZeroOrOneExpr(expr)
+ default:
+ panic(fmt.Sprintf("unknown expression type %T", expr))
+ }
+ if p.memoize {
+ p.setMemoized(pt, expr, resultTuple{val, ok, p.pt})
+ }
+ return val, ok
+}
+
+func (p *parser) parseActionExpr(act *actionExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseActionExpr"))
+ }
+
+ start := p.pt
+ val, ok := p.parseExpr(act.expr)
+ if ok {
+ p.cur.pos = start.position
+ p.cur.text = p.sliceFrom(start)
+ actVal, err := act.run(p)
+ if err != nil {
+ p.addErrAt(err, start.position)
+ }
+ val = actVal
+ }
+ if ok && p.debug {
+ p.print(strings.Repeat(" ", p.depth)+"MATCH", string(p.sliceFrom(start)))
+ }
+ return val, ok
+}
+
+func (p *parser) parseAndCodeExpr(and *andCodeExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseAndCodeExpr"))
+ }
+
+ ok, err := and.run(p)
+ if err != nil {
+ p.addErr(err)
+ }
+ return nil, ok
+}
+
+func (p *parser) parseAndExpr(and *andExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseAndExpr"))
+ }
+
+ pt := p.pt
+ p.pushV()
+ _, ok := p.parseExpr(and.expr)
+ p.popV()
+ p.restore(pt)
+ return nil, ok
+}
+
+func (p *parser) parseAnyMatcher(any *anyMatcher) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseAnyMatcher"))
+ }
+
+ if p.pt.rn != utf8.RuneError {
+ start := p.pt
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ return nil, false
+}
+
+func (p *parser) parseCharClassMatcher(chr *charClassMatcher) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseCharClassMatcher"))
+ }
+
+ cur := p.pt.rn
+ // can't match EOF
+ if cur == utf8.RuneError {
+ return nil, false
+ }
+ start := p.pt
+ if chr.ignoreCase {
+ cur = unicode.ToLower(cur)
+ }
+
+ // try to match in the list of available chars
+ for _, rn := range chr.chars {
+ if rn == cur {
+ if chr.inverted {
+ return nil, false
+ }
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ }
+
+ // try to match in the list of ranges
+ for i := 0; i < len(chr.ranges); i += 2 {
+ if cur >= chr.ranges[i] && cur <= chr.ranges[i+1] {
+ if chr.inverted {
+ return nil, false
+ }
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ }
+
+ // try to match in the list of Unicode classes
+ for _, cl := range chr.classes {
+ if unicode.Is(cl, cur) {
+ if chr.inverted {
+ return nil, false
+ }
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ }
+
+ if chr.inverted {
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ return nil, false
+}
+
+func (p *parser) parseChoiceExpr(ch *choiceExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseChoiceExpr"))
+ }
+
+ for _, alt := range ch.alternatives {
+ p.pushV()
+ val, ok := p.parseExpr(alt)
+ p.popV()
+ if ok {
+ return val, ok
+ }
+ }
+ return nil, false
+}
+
+func (p *parser) parseLabeledExpr(lab *labeledExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseLabeledExpr"))
+ }
+
+ p.pushV()
+ val, ok := p.parseExpr(lab.expr)
+ p.popV()
+ if ok && lab.label != "" {
+ m := p.vstack[len(p.vstack)-1]
+ m[lab.label] = val
+ }
+ return val, ok
+}
+
+func (p *parser) parseLitMatcher(lit *litMatcher) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseLitMatcher"))
+ }
+
+ start := p.pt
+ for _, want := range lit.val {
+ cur := p.pt.rn
+ if lit.ignoreCase {
+ cur = unicode.ToLower(cur)
+ }
+ if cur != want {
+ p.restore(start)
+ return nil, false
+ }
+ p.read()
+ }
+ return p.sliceFrom(start), true
+}
+
+func (p *parser) parseNotCodeExpr(not *notCodeExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseNotCodeExpr"))
+ }
+
+ ok, err := not.run(p)
+ if err != nil {
+ p.addErr(err)
+ }
+ return nil, !ok
+}
+
+func (p *parser) parseNotExpr(not *notExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseNotExpr"))
+ }
+
+ pt := p.pt
+ p.pushV()
+ _, ok := p.parseExpr(not.expr)
+ p.popV()
+ p.restore(pt)
+ return nil, !ok
+}
+
+func (p *parser) parseOneOrMoreExpr(expr *oneOrMoreExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseOneOrMoreExpr"))
+ }
+
+ var vals []interface{}
+
+ for {
+ p.pushV()
+ val, ok := p.parseExpr(expr.expr)
+ p.popV()
+ if !ok {
+ if len(vals) == 0 {
+ // did not match once, no match
+ return nil, false
+ }
+ return vals, true
+ }
+ vals = append(vals, val)
+ }
+}
+
+func (p *parser) parseRuleRefExpr(ref *ruleRefExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseRuleRefExpr " + ref.name))
+ }
+
+ if ref.name == "" {
+ panic(fmt.Sprintf("%s: invalid rule: missing name", ref.pos))
+ }
+
+ rule := p.rules[ref.name]
+ if rule == nil {
+ p.addErr(fmt.Errorf("undefined rule: %s", ref.name))
+ return nil, false
+ }
+ return p.parseRule(rule)
+}
+
+func (p *parser) parseSeqExpr(seq *seqExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseSeqExpr"))
+ }
+
+ var vals []interface{}
+
+ pt := p.pt
+ for _, expr := range seq.exprs {
+ val, ok := p.parseExpr(expr)
+ if !ok {
+ p.restore(pt)
+ return nil, false
+ }
+ vals = append(vals, val)
+ }
+ return vals, true
+}
+
+func (p *parser) parseZeroOrMoreExpr(expr *zeroOrMoreExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseZeroOrMoreExpr"))
+ }
+
+ var vals []interface{}
+
+ for {
+ p.pushV()
+ val, ok := p.parseExpr(expr.expr)
+ p.popV()
+ if !ok {
+ return vals, true
+ }
+ vals = append(vals, val)
+ }
+}
+
+func (p *parser) parseZeroOrOneExpr(expr *zeroOrOneExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseZeroOrOneExpr"))
+ }
+
+ p.pushV()
+ val, _ := p.parseExpr(expr.expr)
+ p.popV()
+ // whether it matched or not, consider it a match
+ return val, true
+}
+
+func rangeTable(class string) *unicode.RangeTable {
+ if rt, ok := unicode.Categories[class]; ok {
+ return rt
+ }
+ if rt, ok := unicode.Properties[class]; ok {
+ return rt
+ }
+ if rt, ok := unicode.Scripts[class]; ok {
+ return rt
+ }
+
+ // cannot happen
+ panic(fmt.Sprintf("invalid Unicode class: %s", class))
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/examples/json/json.peg b/vendor/github.com/PuerkitoBio/pigeon/examples/json/json.peg
new file mode 100644
index 0000000000..ecb80f49fc
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/examples/json/json.peg
@@ -0,0 +1,118 @@
+{
+// Command json parses JSON as defined by [1].
+//
+// BUGS: the escaped forward solidus (`\/`) is not currently handled.
+//
+// [1]: http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-404.pdf
+package main
+
+func main() {
+ in := os.Stdin
+ nm := "stdin"
+ if len(os.Args) > 1 {
+ f, err := os.Open(os.Args[1])
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer f.Close()
+ in = f
+ nm = os.Args[1]
+ }
+
+ got, err := ParseReader(nm, in)
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Println(got)
+}
+
+func toIfaceSlice(v interface{}) []interface{} {
+ if v == nil {
+ return nil
+ }
+ return v.([]interface{})
+}
+}
+
+JSON ← _ vals:Value+ EOF {
+ valsSl := toIfaceSlice(vals)
+ switch len(valsSl) {
+ case 0:
+ return nil, nil
+ case 1:
+ return valsSl[0], nil
+ default:
+ return valsSl, nil
+ }
+}
+
+Value ← val:( Object / Array / Number / String / Bool / Null ) _ {
+ return val, nil
+}
+
+Object ← '{' _ vals:( String _ ':' _ Value ( ',' _ String _ ':' _ Value )* )? '}' {
+ res := make(map[string]interface{})
+ valsSl := toIfaceSlice(vals)
+ if len(valsSl) == 0 {
+ return res, nil
+ }
+ res[valsSl[0].(string)] = valsSl[4]
+ restSl := toIfaceSlice(valsSl[5])
+ for _, v := range restSl {
+ vSl := toIfaceSlice(v)
+ res[vSl[2].(string)] = vSl[6]
+ }
+ return res, nil
+}
+
+Array ← '[' _ vals:( Value ( ',' _ Value )* )? ']' {
+ valsSl := toIfaceSlice(vals)
+ if len(valsSl) == 0 {
+ return []interface{}{}, nil
+ }
+ res := []interface{}{valsSl[0]}
+ restSl := toIfaceSlice(valsSl[1])
+ for _, v := range restSl {
+ vSl := toIfaceSlice(v)
+ res = append(res, vSl[2])
+ }
+ return res, nil
+}
+
+Number ← '-'? Integer ( '.' DecimalDigit+ )? Exponent? {
+ // JSON numbers have the same syntax as Go's, and are parseable using
+ // strconv.
+ return strconv.ParseFloat(string(c.text), 64)
+}
+
+Integer ← '0' / NonZeroDecimalDigit DecimalDigit*
+
+Exponent ← 'e'i [+-]? DecimalDigit+
+
+String ← '"' ( !EscapedChar . / '\\' EscapeSequence )* '"' {
+ // TODO : the forward slash (solidus) is not a valid escape in Go, it will
+ // fail if there's one in the string
+ return strconv.Unquote(string(c.text))
+}
+
+EscapedChar ← [\x00-\x1f"\\]
+
+EscapeSequence ← SingleCharEscape / UnicodeEscape
+
+SingleCharEscape ← ["\\/bfnrt]
+
+UnicodeEscape ← 'u' HexDigit HexDigit HexDigit HexDigit
+
+DecimalDigit ← [0-9]
+
+NonZeroDecimalDigit ← [1-9]
+
+HexDigit ← [0-9a-f]i
+
+Bool ← "true" { return true, nil } / "false" { return false, nil }
+
+Null ← "null" { return nil, nil }
+
+_ "whitespace" ← [ \t\r\n]*
+
+EOF ← !.
diff --git a/vendor/github.com/PuerkitoBio/pigeon/examples/json/json_test.go b/vendor/github.com/PuerkitoBio/pigeon/examples/json/json_test.go
new file mode 100644
index 0000000000..f7bd662f15
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/examples/json/json_test.go
@@ -0,0 +1,95 @@
+package main
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "path/filepath"
+ "reflect"
+ "testing"
+)
+
+func TestCmpStdlib(t *testing.T) {
+ files := testJSONFiles(t)
+ for _, file := range files {
+ pgot, err := ParseFile(file)
+ if err != nil {
+ t.Errorf("%s: pigeon.ParseFile: %v", file, err)
+ continue
+ }
+
+ b, err := ioutil.ReadFile(file)
+ if err != nil {
+ t.Errorf("%s: ioutil.ReadAll: %v", file, err)
+ continue
+ }
+ var jgot interface{}
+ if err := json.Unmarshal(b, &jgot); err != nil {
+ t.Errorf("%s: json.Unmarshal: %v", file, err)
+ continue
+ }
+
+ if !reflect.DeepEqual(pgot, jgot) {
+ t.Errorf("%s: not equal", file)
+ continue
+ }
+ }
+}
+
+func testJSONFiles(t *testing.T) []string {
+ const rootDir = "testdata"
+
+ fis, err := ioutil.ReadDir(rootDir)
+ if err != nil {
+ t.Fatal(err)
+ }
+ files := make([]string, 0, len(fis))
+ for _, fi := range fis {
+ if filepath.Ext(fi.Name()) == ".json" {
+ files = append(files, filepath.Join(rootDir, fi.Name()))
+ }
+ }
+ return files
+}
+
+func BenchmarkPigeonJSONNoMemo(b *testing.B) {
+ d, err := ioutil.ReadFile("testdata/github-octokit-repos.json")
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ if _, err := Parse("", d, Memoize(false)); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkPigeonJSONMemo(b *testing.B) {
+ d, err := ioutil.ReadFile("testdata/github-octokit-repos.json")
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ if _, err := Parse("", d, Memoize(true)); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkStdlibJSON(b *testing.B) {
+ d, err := ioutil.ReadFile("testdata/github-octokit-repos.json")
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ var iface interface{}
+ if err := json.Unmarshal(d, &iface); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/examples/json/testdata/github-octocat-status1.json b/vendor/github.com/PuerkitoBio/pigeon/examples/json/testdata/github-octocat-status1.json
new file mode 100644
index 0000000000..41b42e677b
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/examples/json/testdata/github-octocat-status1.json
@@ -0,0 +1,3 @@
+[
+
+]
diff --git a/vendor/github.com/PuerkitoBio/pigeon/examples/json/testdata/github-octokit-repos.json b/vendor/github.com/PuerkitoBio/pigeon/examples/json/testdata/github-octokit-repos.json
new file mode 100644
index 0000000000..0a5916fa4b
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/examples/json/testdata/github-octokit-repos.json
@@ -0,0 +1,370 @@
+[
+ {
+ "id": 417862,
+ "name": "octokit.rb",
+ "full_name": "octokit/octokit.rb",
+ "owner": {
+ "login": "octokit",
+ "id": 3430433,
+ "avatar_url": "https://avatars.githubusercontent.com/u/3430433?v=3",
+ "gravatar_id": "",
+ "url": "https://api.github.com/users/octokit",
+ "html_url": "https://github.com/octokit",
+ "followers_url": "https://api.github.com/users/octokit/followers",
+ "following_url": "https://api.github.com/users/octokit/following{/other_user}",
+ "gists_url": "https://api.github.com/users/octokit/gists{/gist_id}",
+ "starred_url": "https://api.github.com/users/octokit/starred{/owner}{/repo}",
+ "subscriptions_url": "https://api.github.com/users/octokit/subscriptions",
+ "organizations_url": "https://api.github.com/users/octokit/orgs",
+ "repos_url": "https://api.github.com/users/octokit/repos",
+ "events_url": "https://api.github.com/users/octokit/events{/privacy}",
+ "received_events_url": "https://api.github.com/users/octokit/received_events",
+ "type": "Organization",
+ "site_admin": false
+ },
+ "private": false,
+ "html_url": "https://github.com/octokit/octokit.rb",
+ "description": "Ruby toolkit for the GitHub API",
+ "fork": false,
+ "url": "https://api.github.com/repos/octokit/octokit.rb",
+ "forks_url": "https://api.github.com/repos/octokit/octokit.rb/forks",
+ "keys_url": "https://api.github.com/repos/octokit/octokit.rb/keys{/key_id}",
+ "collaborators_url": "https://api.github.com/repos/octokit/octokit.rb/collaborators{/collaborator}",
+ "teams_url": "https://api.github.com/repos/octokit/octokit.rb/teams",
+ "hooks_url": "https://api.github.com/repos/octokit/octokit.rb/hooks",
+ "issue_events_url": "https://api.github.com/repos/octokit/octokit.rb/issues/events{/number}",
+ "events_url": "https://api.github.com/repos/octokit/octokit.rb/events",
+ "assignees_url": "https://api.github.com/repos/octokit/octokit.rb/assignees{/user}",
+ "branches_url": "https://api.github.com/repos/octokit/octokit.rb/branches{/branch}",
+ "tags_url": "https://api.github.com/repos/octokit/octokit.rb/tags",
+ "blobs_url": "https://api.github.com/repos/octokit/octokit.rb/git/blobs{/sha}",
+ "git_tags_url": "https://api.github.com/repos/octokit/octokit.rb/git/tags{/sha}",
+ "git_refs_url": "https://api.github.com/repos/octokit/octokit.rb/git/refs{/sha}",
+ "trees_url": "https://api.github.com/repos/octokit/octokit.rb/git/trees{/sha}",
+ "statuses_url": "https://api.github.com/repos/octokit/octokit.rb/statuses/{sha}",
+ "languages_url": "https://api.github.com/repos/octokit/octokit.rb/languages",
+ "stargazers_url": "https://api.github.com/repos/octokit/octokit.rb/stargazers",
+ "contributors_url": "https://api.github.com/repos/octokit/octokit.rb/contributors",
+ "subscribers_url": "https://api.github.com/repos/octokit/octokit.rb/subscribers",
+ "subscription_url": "https://api.github.com/repos/octokit/octokit.rb/subscription",
+ "commits_url": "https://api.github.com/repos/octokit/octokit.rb/commits{/sha}",
+ "git_commits_url": "https://api.github.com/repos/octokit/octokit.rb/git/commits{/sha}",
+ "comments_url": "https://api.github.com/repos/octokit/octokit.rb/comments{/number}",
+ "issue_comment_url": "https://api.github.com/repos/octokit/octokit.rb/issues/comments{/number}",
+ "contents_url": "https://api.github.com/repos/octokit/octokit.rb/contents/{+path}",
+ "compare_url": "https://api.github.com/repos/octokit/octokit.rb/compare/{base}...{head}",
+ "merges_url": "https://api.github.com/repos/octokit/octokit.rb/merges",
+ "archive_url": "https://api.github.com/repos/octokit/octokit.rb/{archive_format}{/ref}",
+ "downloads_url": "https://api.github.com/repos/octokit/octokit.rb/downloads",
+ "issues_url": "https://api.github.com/repos/octokit/octokit.rb/issues{/number}",
+ "pulls_url": "https://api.github.com/repos/octokit/octokit.rb/pulls{/number}",
+ "milestones_url": "https://api.github.com/repos/octokit/octokit.rb/milestones{/number}",
+ "notifications_url": "https://api.github.com/repos/octokit/octokit.rb/notifications{?since,all,participating}",
+ "labels_url": "https://api.github.com/repos/octokit/octokit.rb/labels{/name}",
+ "releases_url": "https://api.github.com/repos/octokit/octokit.rb/releases{/id}",
+ "created_at": "2009-12-10T21:41:49Z",
+ "updated_at": "2015-04-02T15:26:33Z",
+ "pushed_at": "2015-03-25T01:12:36Z",
+ "git_url": "git://github.com/octokit/octokit.rb.git",
+ "ssh_url": "git@github.com:octokit/octokit.rb.git",
+ "clone_url": "https://github.com/octokit/octokit.rb.git",
+ "svn_url": "https://github.com/octokit/octokit.rb",
+ "homepage": "http://octokit.github.io/octokit.rb/",
+ "size": 16088,
+ "stargazers_count": 1845,
+ "watchers_count": 1845,
+ "language": "Ruby",
+ "has_issues": true,
+ "has_downloads": true,
+ "has_wiki": false,
+ "has_pages": true,
+ "forks_count": 401,
+ "mirror_url": null,
+ "open_issues_count": 5,
+ "forks": 401,
+ "open_issues": 5,
+ "watchers": 1845,
+ "default_branch": "master",
+ "permissions": {
+ "admin": false,
+ "push": false,
+ "pull": true
+ }
+ },
+ {
+ "id": 7528679,
+ "name": "octokit.net",
+ "full_name": "octokit/octokit.net",
+ "owner": {
+ "login": "octokit",
+ "id": 3430433,
+ "avatar_url": "https://avatars.githubusercontent.com/u/3430433?v=3",
+ "gravatar_id": "",
+ "url": "https://api.github.com/users/octokit",
+ "html_url": "https://github.com/octokit",
+ "followers_url": "https://api.github.com/users/octokit/followers",
+ "following_url": "https://api.github.com/users/octokit/following{/other_user}",
+ "gists_url": "https://api.github.com/users/octokit/gists{/gist_id}",
+ "starred_url": "https://api.github.com/users/octokit/starred{/owner}{/repo}",
+ "subscriptions_url": "https://api.github.com/users/octokit/subscriptions",
+ "organizations_url": "https://api.github.com/users/octokit/orgs",
+ "repos_url": "https://api.github.com/users/octokit/repos",
+ "events_url": "https://api.github.com/users/octokit/events{/privacy}",
+ "received_events_url": "https://api.github.com/users/octokit/received_events",
+ "type": "Organization",
+ "site_admin": false
+ },
+ "private": false,
+ "html_url": "https://github.com/octokit/octokit.net",
+ "description": "A GitHub API client library for .NET ",
+ "fork": false,
+ "url": "https://api.github.com/repos/octokit/octokit.net",
+ "forks_url": "https://api.github.com/repos/octokit/octokit.net/forks",
+ "keys_url": "https://api.github.com/repos/octokit/octokit.net/keys{/key_id}",
+ "collaborators_url": "https://api.github.com/repos/octokit/octokit.net/collaborators{/collaborator}",
+ "teams_url": "https://api.github.com/repos/octokit/octokit.net/teams",
+ "hooks_url": "https://api.github.com/repos/octokit/octokit.net/hooks",
+ "issue_events_url": "https://api.github.com/repos/octokit/octokit.net/issues/events{/number}",
+ "events_url": "https://api.github.com/repos/octokit/octokit.net/events",
+ "assignees_url": "https://api.github.com/repos/octokit/octokit.net/assignees{/user}",
+ "branches_url": "https://api.github.com/repos/octokit/octokit.net/branches{/branch}",
+ "tags_url": "https://api.github.com/repos/octokit/octokit.net/tags",
+ "blobs_url": "https://api.github.com/repos/octokit/octokit.net/git/blobs{/sha}",
+ "git_tags_url": "https://api.github.com/repos/octokit/octokit.net/git/tags{/sha}",
+ "git_refs_url": "https://api.github.com/repos/octokit/octokit.net/git/refs{/sha}",
+ "trees_url": "https://api.github.com/repos/octokit/octokit.net/git/trees{/sha}",
+ "statuses_url": "https://api.github.com/repos/octokit/octokit.net/statuses/{sha}",
+ "languages_url": "https://api.github.com/repos/octokit/octokit.net/languages",
+ "stargazers_url": "https://api.github.com/repos/octokit/octokit.net/stargazers",
+ "contributors_url": "https://api.github.com/repos/octokit/octokit.net/contributors",
+ "subscribers_url": "https://api.github.com/repos/octokit/octokit.net/subscribers",
+ "subscription_url": "https://api.github.com/repos/octokit/octokit.net/subscription",
+ "commits_url": "https://api.github.com/repos/octokit/octokit.net/commits{/sha}",
+ "git_commits_url": "https://api.github.com/repos/octokit/octokit.net/git/commits{/sha}",
+ "comments_url": "https://api.github.com/repos/octokit/octokit.net/comments{/number}",
+ "issue_comment_url": "https://api.github.com/repos/octokit/octokit.net/issues/comments{/number}",
+ "contents_url": "https://api.github.com/repos/octokit/octokit.net/contents/{+path}",
+ "compare_url": "https://api.github.com/repos/octokit/octokit.net/compare/{base}...{head}",
+ "merges_url": "https://api.github.com/repos/octokit/octokit.net/merges",
+ "archive_url": "https://api.github.com/repos/octokit/octokit.net/{archive_format}{/ref}",
+ "downloads_url": "https://api.github.com/repos/octokit/octokit.net/downloads",
+ "issues_url": "https://api.github.com/repos/octokit/octokit.net/issues{/number}",
+ "pulls_url": "https://api.github.com/repos/octokit/octokit.net/pulls{/number}",
+ "milestones_url": "https://api.github.com/repos/octokit/octokit.net/milestones{/number}",
+ "notifications_url": "https://api.github.com/repos/octokit/octokit.net/notifications{?since,all,participating}",
+ "labels_url": "https://api.github.com/repos/octokit/octokit.net/labels{/name}",
+ "releases_url": "https://api.github.com/repos/octokit/octokit.net/releases{/id}",
+ "created_at": "2013-01-09T20:48:45Z",
+ "updated_at": "2015-04-02T18:10:11Z",
+ "pushed_at": "2015-04-03T11:47:52Z",
+ "git_url": "git://github.com/octokit/octokit.net.git",
+ "ssh_url": "git@github.com:octokit/octokit.net.git",
+ "clone_url": "https://github.com/octokit/octokit.net.git",
+ "svn_url": "https://github.com/octokit/octokit.net",
+ "homepage": null,
+ "size": 70529,
+ "stargazers_count": 637,
+ "watchers_count": 637,
+ "language": "C#",
+ "has_issues": true,
+ "has_downloads": true,
+ "has_wiki": false,
+ "has_pages": false,
+ "forks_count": 270,
+ "mirror_url": null,
+ "open_issues_count": 63,
+ "forks": 270,
+ "open_issues": 63,
+ "watchers": 637,
+ "default_branch": "master",
+ "permissions": {
+ "admin": false,
+ "push": false,
+ "pull": true
+ }
+ },
+ {
+ "id": 7530454,
+ "name": "octokit.objc",
+ "full_name": "octokit/octokit.objc",
+ "owner": {
+ "login": "octokit",
+ "id": 3430433,
+ "avatar_url": "https://avatars.githubusercontent.com/u/3430433?v=3",
+ "gravatar_id": "",
+ "url": "https://api.github.com/users/octokit",
+ "html_url": "https://github.com/octokit",
+ "followers_url": "https://api.github.com/users/octokit/followers",
+ "following_url": "https://api.github.com/users/octokit/following{/other_user}",
+ "gists_url": "https://api.github.com/users/octokit/gists{/gist_id}",
+ "starred_url": "https://api.github.com/users/octokit/starred{/owner}{/repo}",
+ "subscriptions_url": "https://api.github.com/users/octokit/subscriptions",
+ "organizations_url": "https://api.github.com/users/octokit/orgs",
+ "repos_url": "https://api.github.com/users/octokit/repos",
+ "events_url": "https://api.github.com/users/octokit/events{/privacy}",
+ "received_events_url": "https://api.github.com/users/octokit/received_events",
+ "type": "Organization",
+ "site_admin": false
+ },
+ "private": false,
+ "html_url": "https://github.com/octokit/octokit.objc",
+ "description": "GitHub API client for Objective-C",
+ "fork": false,
+ "url": "https://api.github.com/repos/octokit/octokit.objc",
+ "forks_url": "https://api.github.com/repos/octokit/octokit.objc/forks",
+ "keys_url": "https://api.github.com/repos/octokit/octokit.objc/keys{/key_id}",
+ "collaborators_url": "https://api.github.com/repos/octokit/octokit.objc/collaborators{/collaborator}",
+ "teams_url": "https://api.github.com/repos/octokit/octokit.objc/teams",
+ "hooks_url": "https://api.github.com/repos/octokit/octokit.objc/hooks",
+ "issue_events_url": "https://api.github.com/repos/octokit/octokit.objc/issues/events{/number}",
+ "events_url": "https://api.github.com/repos/octokit/octokit.objc/events",
+ "assignees_url": "https://api.github.com/repos/octokit/octokit.objc/assignees{/user}",
+ "branches_url": "https://api.github.com/repos/octokit/octokit.objc/branches{/branch}",
+ "tags_url": "https://api.github.com/repos/octokit/octokit.objc/tags",
+ "blobs_url": "https://api.github.com/repos/octokit/octokit.objc/git/blobs{/sha}",
+ "git_tags_url": "https://api.github.com/repos/octokit/octokit.objc/git/tags{/sha}",
+ "git_refs_url": "https://api.github.com/repos/octokit/octokit.objc/git/refs{/sha}",
+ "trees_url": "https://api.github.com/repos/octokit/octokit.objc/git/trees{/sha}",
+ "statuses_url": "https://api.github.com/repos/octokit/octokit.objc/statuses/{sha}",
+ "languages_url": "https://api.github.com/repos/octokit/octokit.objc/languages",
+ "stargazers_url": "https://api.github.com/repos/octokit/octokit.objc/stargazers",
+ "contributors_url": "https://api.github.com/repos/octokit/octokit.objc/contributors",
+ "subscribers_url": "https://api.github.com/repos/octokit/octokit.objc/subscribers",
+ "subscription_url": "https://api.github.com/repos/octokit/octokit.objc/subscription",
+ "commits_url": "https://api.github.com/repos/octokit/octokit.objc/commits{/sha}",
+ "git_commits_url": "https://api.github.com/repos/octokit/octokit.objc/git/commits{/sha}",
+ "comments_url": "https://api.github.com/repos/octokit/octokit.objc/comments{/number}",
+ "issue_comment_url": "https://api.github.com/repos/octokit/octokit.objc/issues/comments{/number}",
+ "contents_url": "https://api.github.com/repos/octokit/octokit.objc/contents/{+path}",
+ "compare_url": "https://api.github.com/repos/octokit/octokit.objc/compare/{base}...{head}",
+ "merges_url": "https://api.github.com/repos/octokit/octokit.objc/merges",
+ "archive_url": "https://api.github.com/repos/octokit/octokit.objc/{archive_format}{/ref}",
+ "downloads_url": "https://api.github.com/repos/octokit/octokit.objc/downloads",
+ "issues_url": "https://api.github.com/repos/octokit/octokit.objc/issues{/number}",
+ "pulls_url": "https://api.github.com/repos/octokit/octokit.objc/pulls{/number}",
+ "milestones_url": "https://api.github.com/repos/octokit/octokit.objc/milestones{/number}",
+ "notifications_url": "https://api.github.com/repos/octokit/octokit.objc/notifications{?since,all,participating}",
+ "labels_url": "https://api.github.com/repos/octokit/octokit.objc/labels{/name}",
+ "releases_url": "https://api.github.com/repos/octokit/octokit.objc/releases{/id}",
+ "created_at": "2013-01-09T22:42:53Z",
+ "updated_at": "2015-04-03T06:16:41Z",
+ "pushed_at": "2015-03-21T17:10:20Z",
+ "git_url": "git://github.com/octokit/octokit.objc.git",
+ "ssh_url": "git@github.com:octokit/octokit.objc.git",
+ "clone_url": "https://github.com/octokit/octokit.objc.git",
+ "svn_url": "https://github.com/octokit/octokit.objc",
+ "homepage": "",
+ "size": 3779,
+ "stargazers_count": 1131,
+ "watchers_count": 1131,
+ "language": "Objective-C",
+ "has_issues": true,
+ "has_downloads": true,
+ "has_wiki": true,
+ "has_pages": false,
+ "forks_count": 170,
+ "mirror_url": null,
+ "open_issues_count": 26,
+ "forks": 170,
+ "open_issues": 26,
+ "watchers": 1131,
+ "default_branch": "master",
+ "permissions": {
+ "admin": false,
+ "push": false,
+ "pull": true
+ }
+ },
+ {
+ "id": 10575811,
+ "name": "go-octokit",
+ "full_name": "octokit/go-octokit",
+ "owner": {
+ "login": "octokit",
+ "id": 3430433,
+ "avatar_url": "https://avatars.githubusercontent.com/u/3430433?v=3",
+ "gravatar_id": "",
+ "url": "https://api.github.com/users/octokit",
+ "html_url": "https://github.com/octokit",
+ "followers_url": "https://api.github.com/users/octokit/followers",
+ "following_url": "https://api.github.com/users/octokit/following{/other_user}",
+ "gists_url": "https://api.github.com/users/octokit/gists{/gist_id}",
+ "starred_url": "https://api.github.com/users/octokit/starred{/owner}{/repo}",
+ "subscriptions_url": "https://api.github.com/users/octokit/subscriptions",
+ "organizations_url": "https://api.github.com/users/octokit/orgs",
+ "repos_url": "https://api.github.com/users/octokit/repos",
+ "events_url": "https://api.github.com/users/octokit/events{/privacy}",
+ "received_events_url": "https://api.github.com/users/octokit/received_events",
+ "type": "Organization",
+ "site_admin": false
+ },
+ "private": false,
+ "html_url": "https://github.com/octokit/go-octokit",
+ "description": "Simple Go wrapper for the GitHub API",
+ "fork": false,
+ "url": "https://api.github.com/repos/octokit/go-octokit",
+ "forks_url": "https://api.github.com/repos/octokit/go-octokit/forks",
+ "keys_url": "https://api.github.com/repos/octokit/go-octokit/keys{/key_id}",
+ "collaborators_url": "https://api.github.com/repos/octokit/go-octokit/collaborators{/collaborator}",
+ "teams_url": "https://api.github.com/repos/octokit/go-octokit/teams",
+ "hooks_url": "https://api.github.com/repos/octokit/go-octokit/hooks",
+ "issue_events_url": "https://api.github.com/repos/octokit/go-octokit/issues/events{/number}",
+ "events_url": "https://api.github.com/repos/octokit/go-octokit/events",
+ "assignees_url": "https://api.github.com/repos/octokit/go-octokit/assignees{/user}",
+ "branches_url": "https://api.github.com/repos/octokit/go-octokit/branches{/branch}",
+ "tags_url": "https://api.github.com/repos/octokit/go-octokit/tags",
+ "blobs_url": "https://api.github.com/repos/octokit/go-octokit/git/blobs{/sha}",
+ "git_tags_url": "https://api.github.com/repos/octokit/go-octokit/git/tags{/sha}",
+ "git_refs_url": "https://api.github.com/repos/octokit/go-octokit/git/refs{/sha}",
+ "trees_url": "https://api.github.com/repos/octokit/go-octokit/git/trees{/sha}",
+ "statuses_url": "https://api.github.com/repos/octokit/go-octokit/statuses/{sha}",
+ "languages_url": "https://api.github.com/repos/octokit/go-octokit/languages",
+ "stargazers_url": "https://api.github.com/repos/octokit/go-octokit/stargazers",
+ "contributors_url": "https://api.github.com/repos/octokit/go-octokit/contributors",
+ "subscribers_url": "https://api.github.com/repos/octokit/go-octokit/subscribers",
+ "subscription_url": "https://api.github.com/repos/octokit/go-octokit/subscription",
+ "commits_url": "https://api.github.com/repos/octokit/go-octokit/commits{/sha}",
+ "git_commits_url": "https://api.github.com/repos/octokit/go-octokit/git/commits{/sha}",
+ "comments_url": "https://api.github.com/repos/octokit/go-octokit/comments{/number}",
+ "issue_comment_url": "https://api.github.com/repos/octokit/go-octokit/issues/comments{/number}",
+ "contents_url": "https://api.github.com/repos/octokit/go-octokit/contents/{+path}",
+ "compare_url": "https://api.github.com/repos/octokit/go-octokit/compare/{base}...{head}",
+ "merges_url": "https://api.github.com/repos/octokit/go-octokit/merges",
+ "archive_url": "https://api.github.com/repos/octokit/go-octokit/{archive_format}{/ref}",
+ "downloads_url": "https://api.github.com/repos/octokit/go-octokit/downloads",
+ "issues_url": "https://api.github.com/repos/octokit/go-octokit/issues{/number}",
+ "pulls_url": "https://api.github.com/repos/octokit/go-octokit/pulls{/number}",
+ "milestones_url": "https://api.github.com/repos/octokit/go-octokit/milestones{/number}",
+ "notifications_url": "https://api.github.com/repos/octokit/go-octokit/notifications{?since,all,participating}",
+ "labels_url": "https://api.github.com/repos/octokit/go-octokit/labels{/name}",
+ "releases_url": "https://api.github.com/repos/octokit/go-octokit/releases{/id}",
+ "created_at": "2013-06-08T23:50:29Z",
+ "updated_at": "2015-04-02T18:47:34Z",
+ "pushed_at": "2015-04-02T18:48:16Z",
+ "git_url": "git://github.com/octokit/go-octokit.git",
+ "ssh_url": "git@github.com:octokit/go-octokit.git",
+ "clone_url": "https://github.com/octokit/go-octokit.git",
+ "svn_url": "https://github.com/octokit/go-octokit",
+ "homepage": "https://github.com/octokit/go-octokit",
+ "size": 3693,
+ "stargazers_count": 106,
+ "watchers_count": 106,
+ "language": "Go",
+ "has_issues": true,
+ "has_downloads": true,
+ "has_wiki": false,
+ "has_pages": false,
+ "forks_count": 29,
+ "mirror_url": null,
+ "open_issues_count": 16,
+ "forks": 29,
+ "open_issues": 16,
+ "watchers": 106,
+ "default_branch": "master",
+ "permissions": {
+ "admin": false,
+ "push": false,
+ "pull": true
+ }
+ }
+]
diff --git a/vendor/github.com/PuerkitoBio/pigeon/grammar/bootstrap.peg b/vendor/github.com/PuerkitoBio/pigeon/grammar/bootstrap.peg
new file mode 100644
index 0000000000..f80200421c
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/grammar/bootstrap.peg
@@ -0,0 +1,238 @@
+{
+package main
+}
+
+Grammar ← __ initializer:( Initializer __ )? rules:( Rule __ )+ {
+ pos := c.astPos()
+
+ // create the grammar, assign its initializer
+ g := ast.NewGrammar(pos)
+ initSlice := toIfaceSlice(initializer)
+ if len(initSlice) > 0 {
+ g.Init = initSlice[0].(*ast.CodeBlock)
+ }
+
+ rulesSlice := toIfaceSlice(rules)
+ g.Rules = make([]*ast.Rule, len(rulesSlice))
+ for i, duo := range rulesSlice {
+ g.Rules[i] = duo.([]interface{})[0].(*ast.Rule)
+ }
+
+ return g, nil
+}
+
+Initializer ← code:CodeBlock EOS {
+ return code, nil
+}
+
+Rule ← name:IdentifierName __ display:( StringLiteral __ )? RuleDefOp __ expr:Expression EOS {
+ pos := c.astPos()
+
+ rule := ast.NewRule(pos, name.(*ast.Identifier))
+ displaySlice := toIfaceSlice(display)
+ if len(displaySlice) > 0 {
+ rule.DisplayName = displaySlice[0].(*ast.StringLit)
+ }
+ rule.Expr = expr.(ast.Expression)
+
+ return rule, nil
+}
+
+Expression ← ChoiceExpr
+
+ChoiceExpr ← first:ActionExpr rest:( __ "/" __ ActionExpr )* {
+ restSlice := toIfaceSlice(rest)
+ if len(restSlice) == 0 {
+ return first, nil
+ }
+
+ pos := c.astPos()
+ choice := ast.NewChoiceExpr(pos)
+ choice.Alternatives = []ast.Expression{first.(ast.Expression)}
+ for _, sl := range restSlice {
+ choice.Alternatives = append(choice.Alternatives, sl.([]interface{})[3].(ast.Expression))
+ }
+ return choice, nil
+}
+
+ActionExpr ← expr:SeqExpr code:( __ CodeBlock )? {
+ if code == nil {
+ return expr, nil
+ }
+
+ pos := c.astPos()
+ act := ast.NewActionExpr(pos)
+ act.Expr = expr.(ast.Expression)
+ codeSlice := toIfaceSlice(code)
+ act.Code = codeSlice[1].(*ast.CodeBlock)
+
+ return act, nil
+}
+
+SeqExpr ← first:LabeledExpr rest:( __ LabeledExpr )* {
+ restSlice := toIfaceSlice(rest)
+ if len(restSlice) == 0 {
+ return first, nil
+ }
+ seq := ast.NewSeqExpr(c.astPos())
+ seq.Exprs = []ast.Expression{first.(ast.Expression)}
+ for _, sl := range restSlice {
+ seq.Exprs = append(seq.Exprs, sl.([]interface{})[1].(ast.Expression))
+ }
+ return seq, nil
+}
+
+LabeledExpr ← label:Identifier __ ':' __ expr:PrefixedExpr {
+ pos := c.astPos()
+ lab := ast.NewLabeledExpr(pos)
+ lab.Label = label.(*ast.Identifier)
+ lab.Expr = expr.(ast.Expression)
+ return lab, nil
+} / PrefixedExpr
+
+PrefixedExpr ← op:PrefixedOp __ expr:SuffixedExpr {
+ pos := c.astPos()
+ opStr := op.(string)
+ if opStr == "&" {
+ and := ast.NewAndExpr(pos)
+ and.Expr = expr.(ast.Expression)
+ return and, nil
+ }
+ not := ast.NewNotExpr(pos)
+ not.Expr = expr.(ast.Expression)
+ return not, nil
+} / SuffixedExpr
+
+PrefixedOp ← ( '&' / '!' ) {
+ return string(c.text), nil
+}
+
+SuffixedExpr ← expr:PrimaryExpr __ op:SuffixedOp {
+ pos := c.astPos()
+ opStr := op.(string)
+ switch opStr {
+ case "?":
+ zero := ast.NewZeroOrOneExpr(pos)
+ zero.Expr = expr.(ast.Expression)
+ return zero, nil
+ case "*":
+ zero := ast.NewZeroOrMoreExpr(pos)
+ zero.Expr = expr.(ast.Expression)
+ return zero, nil
+ case "+":
+ one := ast.NewOneOrMoreExpr(pos)
+ one.Expr = expr.(ast.Expression)
+ return one, nil
+ default:
+ return nil, errors.New("unknown operator: " + opStr)
+ }
+} / PrimaryExpr
+
+SuffixedOp ← ( '?' / '*' / '+' ) {
+ return string(c.text), nil
+}
+
+PrimaryExpr ← LitMatcher / CharClassMatcher / AnyMatcher / RuleRefExpr / SemanticPredExpr / "(" __ expr:Expression __ ")" {
+ return expr, nil
+}
+RuleRefExpr ← name:IdentifierName !( __ ( StringLiteral __ )? RuleDefOp ) {
+ ref := ast.NewRuleRefExpr(c.astPos())
+ ref.Name = name.(*ast.Identifier)
+ return ref, nil
+}
+SemanticPredExpr ← op:SemanticPredOp __ code:CodeBlock {
+ opStr := op.(string)
+ if opStr == "&" {
+ and := ast.NewAndCodeExpr(c.astPos())
+ and.Code = code.(*ast.CodeBlock)
+ return and, nil
+ }
+ not := ast.NewNotCodeExpr(c.astPos())
+ not.Code = code.(*ast.CodeBlock)
+ return not, nil
+}
+SemanticPredOp ← ( '&' / '!' ) {
+ return string(c.text), nil
+}
+
+RuleDefOp ← '=' / "<-" / '\u2190' / '\u27f5'
+
+SourceChar ← .
+Comment ← MultiLineComment / SingleLineComment
+MultiLineComment ← "/*" ( !"*/" SourceChar )* "*/"
+MultiLineCommentNoLineTerminator ← "/*" ( !( "*/" / EOL ) SourceChar )* "*/"
+SingleLineComment ← "//" ( !EOL SourceChar )*
+
+Identifier ← IdentifierName
+IdentifierName ← IdentifierStart IdentifierPart* {
+ return ast.NewIdentifier(c.astPos(), string(c.text)), nil
+}
+IdentifierStart ← [a-z_]i
+IdentifierPart ← IdentifierStart / [0-9]
+
+LitMatcher ← lit:StringLiteral ignore:"i"? {
+ rawStr := lit.(*ast.StringLit).Val
+ s, err := strconv.Unquote(rawStr)
+ if err != nil {
+ return nil, err
+ }
+ m := ast.NewLitMatcher(c.astPos(), s)
+ m.IgnoreCase = ignore != nil
+ return m, nil
+}
+StringLiteral ← ( '"' DoubleStringChar* '"' / "'" SingleStringChar "'" / '`' RawStringChar '`' ) {
+ return ast.NewStringLit(c.astPos(), string(c.text)), nil
+}
+DoubleStringChar ← !( '"' / "\\" / EOL ) SourceChar / "\\" DoubleStringEscape
+SingleStringChar ← !( "'" / "\\" / EOL ) SourceChar / "\\" SingleStringEscape
+RawStringChar ← !'`' SourceChar
+
+DoubleStringEscape ← "'" / CommonEscapeSequence
+SingleStringEscape ← '"' / CommonEscapeSequence
+
+CommonEscapeSequence ← SingleCharEscape / OctalEscape / HexEscape / LongUnicodeEscape / ShortUnicodeEscape
+SingleCharEscape ← 'a' / 'b' / 'n' / 'f' / 'r' / 't' / 'v' / '\\'
+OctalEscape ← OctalDigit OctalDigit OctalDigit
+HexEscape ← 'x' HexDigit HexDigit
+LongUnicodeEscape ← 'U' HexDigit HexDigit HexDigit HexDigit HexDigit HexDigit HexDigit HexDigit
+ShortUnicodeEscape ← 'u' HexDigit HexDigit HexDigit HexDigit
+
+OctalDigit ← [0-7]
+DecimalDigit ← [0-9]
+HexDigit ← [0-9a-f]i
+
+CharClassMatcher ← '[' ( ClassCharRange / ClassChar / "\\" UnicodeClassEscape )* ']' 'i'? {
+ pos := c.astPos()
+ cc := ast.NewCharClassMatcher(pos, string(c.text))
+ return cc, nil
+}
+ClassCharRange ← ClassChar '-' ClassChar
+ClassChar ← !( "]" / "\\" / EOL ) SourceChar / "\\" CharClassEscape
+CharClassEscape ← ']' / CommonEscapeSequence
+
+UnicodeClassEscape ← 'p' ( SingleCharUnicodeClass / '{' UnicodeClass '}' )
+SingleCharUnicodeClass ← [LMNCPZS]
+UnicodeClass ← [a-z_]i+
+
+AnyMatcher ← "." {
+ any := ast.NewAnyMatcher(c.astPos(), ".")
+ return any, nil
+}
+
+CodeBlock ← "{" Code "}" {
+ pos := c.astPos()
+ cb := ast.NewCodeBlock(pos, string(c.text))
+ return cb, nil
+}
+
+Code ← ( ( ![{}] SourceChar )+ / "{" Code "}" )*
+
+__ ← ( Whitespace / EOL / Comment )*
+_ ← ( Whitespace / MultiLineCommentNoLineTerminator )*
+
+Whitespace ← [ \t\r]
+EOL ← '\n'
+EOS ← __ ';' / _ SingleLineComment? EOL / __ EOF
+
+EOF ← !.
+
diff --git a/vendor/github.com/PuerkitoBio/pigeon/grammar/pigeon.peg b/vendor/github.com/PuerkitoBio/pigeon/grammar/pigeon.peg
new file mode 100644
index 0000000000..5d90993b8a
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/grammar/pigeon.peg
@@ -0,0 +1,294 @@
+{
+package main
+}
+
+Grammar ← __ initializer:( Initializer __ )? rules:( Rule __ )+ EOF {
+ pos := c.astPos()
+
+ // create the grammar, assign its initializer
+ g := ast.NewGrammar(pos)
+ initSlice := toIfaceSlice(initializer)
+ if len(initSlice) > 0 {
+ g.Init = initSlice[0].(*ast.CodeBlock)
+ }
+
+ rulesSlice := toIfaceSlice(rules)
+ g.Rules = make([]*ast.Rule, len(rulesSlice))
+ for i, duo := range rulesSlice {
+ g.Rules[i] = duo.([]interface{})[0].(*ast.Rule)
+ }
+
+ return g, nil
+}
+
+Initializer ← code:CodeBlock EOS {
+ return code, nil
+}
+
+Rule ← name:IdentifierName __ display:( StringLiteral __ )? RuleDefOp __ expr:Expression EOS {
+ pos := c.astPos()
+
+ rule := ast.NewRule(pos, name.(*ast.Identifier))
+ displaySlice := toIfaceSlice(display)
+ if len(displaySlice) > 0 {
+ rule.DisplayName = displaySlice[0].(*ast.StringLit)
+ }
+ rule.Expr = expr.(ast.Expression)
+
+ return rule, nil
+}
+
+Expression ← ChoiceExpr
+
+ChoiceExpr ← first:ActionExpr rest:( __ "/" __ ActionExpr )* {
+ restSlice := toIfaceSlice(rest)
+ if len(restSlice) == 0 {
+ return first, nil
+ }
+
+ pos := c.astPos()
+ choice := ast.NewChoiceExpr(pos)
+ choice.Alternatives = []ast.Expression{first.(ast.Expression)}
+ for _, sl := range restSlice {
+ choice.Alternatives = append(choice.Alternatives, sl.([]interface{})[3].(ast.Expression))
+ }
+ return choice, nil
+}
+
+ActionExpr ← expr:SeqExpr code:( __ CodeBlock )? {
+ if code == nil {
+ return expr, nil
+ }
+
+ pos := c.astPos()
+ act := ast.NewActionExpr(pos)
+ act.Expr = expr.(ast.Expression)
+ codeSlice := toIfaceSlice(code)
+ act.Code = codeSlice[1].(*ast.CodeBlock)
+
+ return act, nil
+}
+
+SeqExpr ← first:LabeledExpr rest:( __ LabeledExpr )* {
+ restSlice := toIfaceSlice(rest)
+ if len(restSlice) == 0 {
+ return first, nil
+ }
+ seq := ast.NewSeqExpr(c.astPos())
+ seq.Exprs = []ast.Expression{first.(ast.Expression)}
+ for _, sl := range restSlice {
+ seq.Exprs = append(seq.Exprs, sl.([]interface{})[1].(ast.Expression))
+ }
+ return seq, nil
+}
+
+LabeledExpr ← label:Identifier __ ':' __ expr:PrefixedExpr {
+ pos := c.astPos()
+ lab := ast.NewLabeledExpr(pos)
+ lab.Label = label.(*ast.Identifier)
+ lab.Expr = expr.(ast.Expression)
+ return lab, nil
+} / PrefixedExpr
+
+PrefixedExpr ← op:PrefixedOp __ expr:SuffixedExpr {
+ pos := c.astPos()
+ opStr := op.(string)
+ if opStr == "&" {
+ and := ast.NewAndExpr(pos)
+ and.Expr = expr.(ast.Expression)
+ return and, nil
+ }
+ not := ast.NewNotExpr(pos)
+ not.Expr = expr.(ast.Expression)
+ return not, nil
+} / SuffixedExpr
+
+PrefixedOp ← ( '&' / '!' ) {
+ return string(c.text), nil
+}
+
+SuffixedExpr ← expr:PrimaryExpr __ op:SuffixedOp {
+ pos := c.astPos()
+ opStr := op.(string)
+ switch opStr {
+ case "?":
+ zero := ast.NewZeroOrOneExpr(pos)
+ zero.Expr = expr.(ast.Expression)
+ return zero, nil
+ case "*":
+ zero := ast.NewZeroOrMoreExpr(pos)
+ zero.Expr = expr.(ast.Expression)
+ return zero, nil
+ case "+":
+ one := ast.NewOneOrMoreExpr(pos)
+ one.Expr = expr.(ast.Expression)
+ return one, nil
+ default:
+ return nil, errors.New("unknown operator: " + opStr)
+ }
+} / PrimaryExpr
+
+SuffixedOp ← ( '?' / '*' / '+' ) {
+ return string(c.text), nil
+}
+
+PrimaryExpr ← LitMatcher / CharClassMatcher / AnyMatcher / RuleRefExpr / SemanticPredExpr / "(" __ expr:Expression __ ")" {
+ return expr, nil
+}
+RuleRefExpr ← name:IdentifierName !( __ ( StringLiteral __ )? RuleDefOp ) {
+ ref := ast.NewRuleRefExpr(c.astPos())
+ ref.Name = name.(*ast.Identifier)
+ return ref, nil
+}
+SemanticPredExpr ← op:SemanticPredOp __ code:CodeBlock {
+ opStr := op.(string)
+ if opStr == "&" {
+ and := ast.NewAndCodeExpr(c.astPos())
+ and.Code = code.(*ast.CodeBlock)
+ return and, nil
+ }
+ not := ast.NewNotCodeExpr(c.astPos())
+ not.Code = code.(*ast.CodeBlock)
+ return not, nil
+}
+SemanticPredOp ← ( '&' / '!' ) {
+ return string(c.text), nil
+}
+
+RuleDefOp ← '=' / "<-" / '\u2190' / '\u27f5'
+
+SourceChar ← .
+Comment ← MultiLineComment / SingleLineComment
+MultiLineComment ← "/*" ( !"*/" SourceChar )* "*/"
+MultiLineCommentNoLineTerminator ← "/*" ( !( "*/" / EOL ) SourceChar )* "*/"
+SingleLineComment ← "//" ( !EOL SourceChar )*
+
+Identifier ← ident:IdentifierName {
+ astIdent := ast.NewIdentifier(c.astPos(), string(c.text))
+ if reservedWords[astIdent.Val] {
+ return astIdent, errors.New("identifier is a reserved word")
+ }
+ return astIdent, nil
+}
+
+IdentifierName ← IdentifierStart IdentifierPart* {
+ return ast.NewIdentifier(c.astPos(), string(c.text)), nil
+}
+IdentifierStart ← [\pL_]
+IdentifierPart ← IdentifierStart / [\p{Nd}]
+
+LitMatcher ← lit:StringLiteral ignore:"i"? {
+ rawStr := lit.(*ast.StringLit).Val
+ s, err := strconv.Unquote(rawStr)
+ if err != nil {
+ // an invalid string literal raises an error in the escape rules,
+ // so simply replace the literal with an empty string here to
+ // avoid a cascade of errors.
+ s = ""
+ }
+ m := ast.NewLitMatcher(c.astPos(), s)
+ m.IgnoreCase = ignore != nil
+ return m, nil
+}
+StringLiteral ← ( '"' DoubleStringChar* '"' / "'" SingleStringChar "'" / '`' RawStringChar* '`' ) {
+ return ast.NewStringLit(c.astPos(), string(c.text)), nil
+} / ( ( '"' DoubleStringChar* ( EOL / EOF ) ) / ( "'" SingleStringChar? ( EOL / EOF ) ) / '`' RawStringChar* EOF ) {
+ return ast.NewStringLit(c.astPos(), "``"), errors.New("string literal not terminated")
+}
+
+DoubleStringChar ← !( '"' / "\\" / EOL ) SourceChar / "\\" DoubleStringEscape
+SingleStringChar ← !( "'" / "\\" / EOL ) SourceChar / "\\" SingleStringEscape
+RawStringChar ← !'`' SourceChar
+
+DoubleStringEscape ← ( '"' / CommonEscapeSequence )
+ / ( SourceChar / EOL / EOF ) {
+ return nil, errors.New("invalid escape character")
+}
+SingleStringEscape ← ( "'" / CommonEscapeSequence )
+ / ( SourceChar / EOL / EOF ) {
+ return nil, errors.New("invalid escape character")
+}
+
+CommonEscapeSequence ← SingleCharEscape / OctalEscape / HexEscape / LongUnicodeEscape / ShortUnicodeEscape
+SingleCharEscape ← 'a' / 'b' / 'n' / 'f' / 'r' / 't' / 'v' / '\\'
+OctalEscape ← OctalDigit OctalDigit OctalDigit
+ / OctalDigit ( SourceChar / EOL / EOF ) {
+ return nil, errors.New("invalid octal escape")
+}
+HexEscape ← 'x' HexDigit HexDigit
+ / 'x' ( SourceChar / EOL / EOF ) {
+ return nil, errors.New("invalid hexadecimal escape")
+}
+LongUnicodeEscape ←
+ 'U' HexDigit HexDigit HexDigit HexDigit HexDigit HexDigit HexDigit HexDigit {
+ return validateUnicodeEscape(string(c.text), "invalid Unicode escape")
+ }
+ / 'U' ( SourceChar / EOL / EOF ) {
+ return nil, errors.New("invalid Unicode escape")
+}
+ShortUnicodeEscape ←
+ 'u' HexDigit HexDigit HexDigit HexDigit {
+ return validateUnicodeEscape(string(c.text), "invalid Unicode escape")
+ }
+ / 'u' ( SourceChar / EOL / EOF ) {
+ return nil, errors.New("invalid Unicode escape")
+}
+
+OctalDigit ← [0-7]
+DecimalDigit ← [0-9]
+HexDigit ← [0-9a-f]i
+
+CharClassMatcher ← '[' ( ClassCharRange / ClassChar / "\\" UnicodeClassEscape )* ']' 'i'? {
+ pos := c.astPos()
+ cc := ast.NewCharClassMatcher(pos, string(c.text))
+ return cc, nil
+} / '[' ( !( EOL ) SourceChar )* ( EOL / EOF ) {
+ return ast.NewCharClassMatcher(c.astPos(), "[]"), errors.New("character class not terminated")
+}
+
+ClassCharRange ← ClassChar '-' ClassChar
+ClassChar ← !( "]" / "\\" / EOL ) SourceChar / "\\" CharClassEscape
+CharClassEscape ← ( ']' / CommonEscapeSequence )
+ / !'p' ( SourceChar / EOL / EOF ) {
+ return nil, errors.New("invalid escape character")
+}
+
+UnicodeClassEscape ← 'p' (
+ SingleCharUnicodeClass
+ / !'{' ( SourceChar / EOL / EOF ) { return nil, errors.New("invalid Unicode class escape") }
+ / '{' ident:IdentifierName '}' {
+ if !unicodeClasses[ident.(*ast.Identifier).Val] {
+ return nil, errors.New("invalid Unicode class escape")
+ }
+ return nil, nil
+ }
+ / '{' IdentifierName ( ']' / EOL / EOF ) {
+ return nil, errors.New("Unicode class not terminated")
+ }
+ )
+SingleCharUnicodeClass ← [LMNCPZS]
+
+AnyMatcher ← "." {
+ any := ast.NewAnyMatcher(c.astPos(), ".")
+ return any, nil
+}
+
+CodeBlock ← '{' Code '}' {
+ pos := c.astPos()
+ cb := ast.NewCodeBlock(pos, string(c.text))
+ return cb, nil
+} / '{' Code EOF {
+ return nil, errors.New("code block not terminated")
+}
+
+Code ← ( ( ![{}] SourceChar )+ / '{' Code '}' )*
+
+__ ← ( Whitespace / EOL / Comment )*
+_ ← ( Whitespace / MultiLineCommentNoLineTerminator )*
+
+Whitespace ← [ \t\r]
+EOL ← '\n'
+EOS ← __ ';' / _ SingleLineComment? EOL / __ EOF
+
+EOF ← !.
+
diff --git a/vendor/github.com/PuerkitoBio/pigeon/main.go b/vendor/github.com/PuerkitoBio/pigeon/main.go
new file mode 100644
index 0000000000..755592c23c
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/main.go
@@ -0,0 +1,195 @@
+package main
+
+import (
+ "bufio"
+ "errors"
+ "flag"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/PuerkitoBio/pigeon/ast"
+ "github.com/PuerkitoBio/pigeon/builder"
+)
+
+var exit = os.Exit
+
+func main() {
+ fs := flag.NewFlagSet(os.Args[0], flag.ExitOnError)
+
+ // define command-line flags
+ var (
+ cacheFlag = fs.Bool("cache", false, "cache parsing results")
+ dbgFlag = fs.Bool("debug", false, "set debug mode")
+ shortHelpFlag = fs.Bool("h", false, "show help page")
+ longHelpFlag = fs.Bool("help", false, "show help page")
+ noRecoverFlag = fs.Bool("no-recover", false, "do not recover from panic")
+ outputFlag = fs.String("o", "", "output file, defaults to stdout")
+ recvrNmFlag = fs.String("receiver-name", "c", "receiver name for the generated methods")
+ noBuildFlag = fs.Bool("x", false, "do not build, only parse")
+ )
+
+ fs.Usage = usage
+ fs.Parse(os.Args[1:])
+
+ if *shortHelpFlag || *longHelpFlag {
+ fs.Usage()
+ exit(0)
+ }
+
+ if fs.NArg() > 1 {
+ argError(1, "expected one argument, got %q", strings.Join(fs.Args(), " "))
+ }
+
+ // get input source
+ infile := ""
+ if fs.NArg() == 1 {
+ infile = fs.Arg(0)
+ }
+ nm, rc := input(infile)
+ defer rc.Close()
+
+ // parse input
+ g, err := ParseReader(nm, rc, Debug(*dbgFlag), Memoize(*cacheFlag), Recover(!*noRecoverFlag))
+ if err != nil {
+ fmt.Fprintln(os.Stderr, "parse error(s):\n", err)
+ exit(3)
+ }
+
+ if !*noBuildFlag {
+ // generate parser
+ out := output(*outputFlag)
+ defer out.Close()
+
+ curNmOpt := builder.ReceiverName(*recvrNmFlag)
+ if err := builder.BuildParser(out, g.(*ast.Grammar), curNmOpt); err != nil {
+ fmt.Fprintln(os.Stderr, "build error: ", err)
+ exit(5)
+ }
+ }
+}
+
+var usagePage = `usage: %s [options] [GRAMMAR_FILE]
+
+Pigeon generates a parser based on a PEG grammar. It doesn't try
+to format the generated code nor to detect required imports -
+it is recommended to pipe the output of pigeon through a tool
+such as goimports to do this, e.g.:
+
+ pigeon GRAMMAR_FILE | goimports > output.go
+
+Use the following command to install goimports:
+
+ go get golang.org/x/tools/cmd/goimports
+
+By default, pigeon reads the grammar from stdin and writes the
+generated parser to stdout. If GRAMMAR_FILE is specified, the
+grammar is read from this file instead. If the -o flag is set,
+the generated code is written to this file instead.
+
+ -cache
+ cache parser results to avoid exponential parsing time in
+ pathological cases. Can make the parsing slower for typical
+ cases and uses more memory.
+ -debug
+ output debugging information while parsing the grammar.
+ -h -help
+ display this help message.
+ -no-recover
+ do not recover from a panic. Useful to access the panic stack
+ when debugging, otherwise the panic is converted to an error.
+ -o OUTPUT_FILE
+ write the generated parser to OUTPUT_FILE. Defaults to stdout.
+ -receiver-name NAME
+ use NAME as for the receiver name of the generated methods
+ for the grammar's code blocks. Defaults to "c".
+ -x
+ do not generate the parser, only parse the grammar.
+
+See https://godoc.org/github.com/PuerkitoBio/pigeon for more
+information.
+`
+
+// usage prints the help page of the command-line tool.
+func usage() {
+ fmt.Printf(usagePage, os.Args[0])
+}
+
+// argError prints an error message to stderr, prints the command usage
+// and exits with the specified exit code.
+func argError(exitCode int, msg string, args ...interface{}) {
+ fmt.Fprintf(os.Stderr, msg, args...)
+ fmt.Fprintln(os.Stderr)
+ usage()
+ exit(exitCode)
+}
+
+// input gets the name and reader to get input text from.
+func input(filename string) (nm string, rc io.ReadCloser) {
+ nm = "stdin"
+ inf := os.Stdin
+ if filename != "" {
+ f, err := os.Open(filename)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ exit(2)
+ }
+ inf = f
+ nm = filename
+ }
+ r := bufio.NewReader(inf)
+ return nm, makeReadCloser(r, inf)
+}
+
+// output gets the writer to write the generated parser to.
+func output(filename string) io.WriteCloser {
+ out := os.Stdout
+ if filename != "" {
+ f, err := os.Create(filename)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ exit(4)
+ }
+ out = f
+ }
+ return out
+}
+
+// create a ReadCloser that reads from r and closes c.
+func makeReadCloser(r io.Reader, c io.Closer) io.ReadCloser {
+ rc := struct {
+ io.Reader
+ io.Closer
+ }{r, c}
+ return io.ReadCloser(rc)
+}
+
+// astPos is a helper method for the PEG grammar parser. It returns the
+// position of the current match as an ast.Pos.
+func (c *current) astPos() ast.Pos {
+ return ast.Pos{Line: c.pos.line, Col: c.pos.col, Off: c.pos.offset}
+}
+
+// toIfaceSlice is a helper function for the PEG grammar parser. It converts
+// v to a slice of empty interfaces.
+func toIfaceSlice(v interface{}) []interface{} {
+ if v == nil {
+ return nil
+ }
+ return v.([]interface{})
+}
+
+// validateUnicodeEscape checks that the provided escape sequence is a
+// valid Unicode escape sequence.
+func validateUnicodeEscape(escape, errMsg string) (interface{}, error) {
+ r, _, _, err := strconv.UnquoteChar("\\"+escape, '"')
+ if err != nil {
+ return nil, errors.New(errMsg)
+ }
+ if 0xD800 <= r && r <= 0xDFFF {
+ return nil, errors.New(errMsg)
+ }
+ return nil, nil
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/main_test.go b/vendor/github.com/PuerkitoBio/pigeon/main_test.go
new file mode 100644
index 0000000000..8dbfa6ac48
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/main_test.go
@@ -0,0 +1,54 @@
+package main
+
+import (
+ "os"
+ "strings"
+ "testing"
+)
+
+func TestMain(t *testing.T) {
+ stdout, stderr := os.Stdout, os.Stderr
+ os.Stdout, _ = os.Open(os.DevNull)
+ os.Stderr, _ = os.Open(os.DevNull)
+ defer func() {
+ exit = os.Exit
+ os.Stdout = stdout
+ os.Stderr = stderr
+ }()
+ exit = func(code int) {
+ panic(code)
+ }
+
+ cases := []struct {
+ args string
+ code int
+ }{
+ {args: "", code: 3}, // stdin: no match found
+ {args: "-h", code: 0}, // help
+ {args: "FILE1 FILE2", code: 1}, // want only 1 non-flag arg
+ {args: "-x", code: 3}, // stdin: no match found
+ }
+
+ for _, tc := range cases {
+ os.Args = append([]string{"pigeon"}, strings.Fields(tc.args)...)
+
+ got := runMainRecover()
+ if got != tc.code {
+ t.Errorf("%q: want code %d, got %d", tc.args, tc.code, got)
+ }
+ }
+}
+
+func runMainRecover() (code int) {
+ defer func() {
+ if e := recover(); e != nil {
+ if i, ok := e.(int); ok {
+ code = i
+ return
+ }
+ panic(e)
+ }
+ }()
+ main()
+ return 0
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/misc/cmd/unicode-classes/main.go b/vendor/github.com/PuerkitoBio/pigeon/misc/cmd/unicode-classes/main.go
new file mode 100644
index 0000000000..f21e6d2a5d
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/misc/cmd/unicode-classes/main.go
@@ -0,0 +1,53 @@
+// Command unicode-classes generates a set-like map of all valid
+// Unicode classes.
+package main
+
+import (
+ "fmt"
+ "sort"
+ "unicode"
+)
+
+func main() {
+ set := make(map[string]bool)
+ for k := range unicode.Categories {
+ set[k] = true
+ }
+ for k := range unicode.Properties {
+ set[k] = true
+ }
+ for k := range unicode.Scripts {
+ set[k] = true
+ }
+ classes := make([]string, 0, len(set))
+ for k := range set {
+ classes = append(classes, k)
+ }
+
+ sort.Strings(classes)
+ fmt.Println(`// This file is generated by the misc/cmd/unicode-classes tool.
+// Do not edit.
+`)
+ fmt.Println("package main")
+ fmt.Println("\nvar unicodeClasses = map[string]bool{")
+ for _, s := range classes {
+ fmt.Printf("\t%q: true,\n", s)
+ }
+ fmt.Println("}")
+}
+
+// lenSorter was used to generate Unicode classes directly in the PEG
+// grammar (where longer classes had to come first).
+type lenSorter []string
+
+func (l lenSorter) Len() int { return len(l) }
+func (l lenSorter) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l lenSorter) Less(i, j int) bool {
+ li, lj := len(l[i]), len(l[j])
+ if lj < li {
+ return true
+ } else if li < lj {
+ return false
+ }
+ return l[j] < l[i]
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/misc/git/pre-commit b/vendor/github.com/PuerkitoBio/pigeon/misc/git/pre-commit
new file mode 100755
index 0000000000..b2466410cd
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/misc/git/pre-commit
@@ -0,0 +1,37 @@
+#!/bin/sh
+# Copyright 2012 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# git gofmt pre-commit hook
+#
+# To use, store as .git/hooks/pre-commit inside your repository and make sure
+# it has execute permissions.
+#
+# This script does not handle file names that contain spaces.
+
+# golint is purely informational, it doesn't fail with exit code != 0 if it finds something,
+# because it may find a lot of false positives. Just print out its result for information.
+echo "lint result (informational only):"
+golint ./...
+
+# go vet returns 1 if an error was found. Exit the hook with this exit code.
+go vet ./...
+vetres=$?
+
+# Check for gofmt problems and report if any.
+gofiles=$(git diff --cached --name-only --diff-filter=ACM | grep '.go$')
+[ -z "$gofiles" ] && echo "EXIT $vetres" && exit $vetres
+
+unformatted=$(gofmt -l $gofiles)
+[ -z "$unformatted" ] && echo "EXIT $vetres" && exit $vetres
+
+# Some files are not gofmt'd. Print message and fail.
+
+echo >&2 "Go files must be formatted with gofmt. Please run:"
+for fn in $unformatted; do
+ echo >&2 " gofmt -w $PWD/$fn"
+done
+
+echo "EXIT 1"
+exit 1
diff --git a/vendor/github.com/PuerkitoBio/pigeon/parse_test.go b/vendor/github.com/PuerkitoBio/pigeon/parse_test.go
new file mode 100644
index 0000000000..0eee8b7468
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/parse_test.go
@@ -0,0 +1,439 @@
+package main
+
+import (
+ "testing"
+
+ "github.com/PuerkitoBio/pigeon/ast"
+)
+
+var invalidParseCases = map[string]string{
+ "": "file:1:1 (0): no match found",
+ "a": "file:1:1 (0): no match found",
+ "abc": "file:1:1 (0): no match found",
+ " ": "file:1:1 (0): no match found",
+ `a = +`: "file:1:1 (0): no match found",
+ `a = *`: "file:1:1 (0): no match found",
+ `a = ?`: "file:1:1 (0): no match found",
+ "a ←": "file:1:1 (0): no match found",
+ "a ← b\nb ←": "file:1:1 (0): no match found",
+ "a ← nil:b": "file:1:5 (6): rule Identifier: identifier is a reserved word",
+ "\xfe": "file:1:1 (0): invalid encoding",
+ "{}{}": "file:1:1 (0): no match found",
+
+ // non-terminated, empty, EOF "quoted" tokens
+ "{": "file:1:1 (0): rule CodeBlock: code block not terminated",
+ "\n{": "file:2:1 (1): rule CodeBlock: code block not terminated",
+ `a = "`: "file:1:5 (4): rule StringLiteral: string literal not terminated",
+ "a = `": "file:1:5 (4): rule StringLiteral: string literal not terminated",
+ "a = '": "file:1:5 (4): rule StringLiteral: string literal not terminated",
+ `a = [`: "file:1:5 (4): rule CharClassMatcher: character class not terminated",
+ `a = [\p{]`: `file:1:5 (4): rule CharClassMatcher: character class not terminated`,
+
+ // non-terminated, empty, EOL "quoted" tokens
+ "{\n": "file:1:1 (0): rule CodeBlock: code block not terminated",
+ "\n{\n": "file:2:1 (1): rule CodeBlock: code block not terminated",
+ "a = \"\n": "file:1:5 (4): rule StringLiteral: string literal not terminated",
+ "a = `\n": "file:1:5 (4): rule StringLiteral: string literal not terminated",
+ "a = '\n": "file:1:5 (4): rule StringLiteral: string literal not terminated",
+ "a = [\n": "file:1:5 (4): rule CharClassMatcher: character class not terminated",
+ "a = [\\p{\n]": `file:1:5 (4): rule CharClassMatcher: character class not terminated`,
+
+ // non-terminated quoted tokens with escaped closing char
+ `a = "\"`: "file:1:5 (4): rule StringLiteral: string literal not terminated",
+ `a = '\'`: "file:1:5 (4): rule StringLiteral: string literal not terminated",
+ `a = [\]`: "file:1:5 (4): rule CharClassMatcher: character class not terminated",
+
+ // non-terminated, non-empty, EOF "quoted" tokens
+ "{a": "file:1:1 (0): rule CodeBlock: code block not terminated",
+ "\n{{}": "file:2:1 (1): rule CodeBlock: code block not terminated",
+ `a = "b`: "file:1:5 (4): rule StringLiteral: string literal not terminated",
+ "a = `b": "file:1:5 (4): rule StringLiteral: string literal not terminated",
+ "a = 'b": "file:1:5 (4): rule StringLiteral: string literal not terminated",
+ `a = [b`: "file:1:5 (4): rule CharClassMatcher: character class not terminated",
+ `a = [\p{W]`: `file:1:8 (7): rule UnicodeClassEscape: Unicode class not terminated
+file:1:5 (4): rule CharClassMatcher: character class not terminated`,
+
+ // invalid escapes
+ `a ← [\pA]`: "file:1:8 (9): rule UnicodeClassEscape: invalid Unicode class escape",
+ `a ← [\p{WW}]`: "file:1:8 (9): rule UnicodeClassEscape: invalid Unicode class escape",
+ `a = '\"'`: "file:1:7 (6): rule SingleStringEscape: invalid escape character",
+ `a = "\'"`: "file:1:7 (6): rule DoubleStringEscape: invalid escape character",
+ `a = [\']`: "file:1:7 (6): rule CharClassEscape: invalid escape character",
+ `a = '\xz'`: "file:1:7 (6): rule HexEscape: invalid hexadecimal escape",
+ `a = '\0z'`: "file:1:7 (6): rule OctalEscape: invalid octal escape",
+ `a = '\uz'`: "file:1:7 (6): rule ShortUnicodeEscape: invalid Unicode escape",
+ `a = '\Uz'`: "file:1:7 (6): rule LongUnicodeEscape: invalid Unicode escape",
+
+ // escapes followed by newline
+ "a = '\\\n": `file:2:0 (6): rule SingleStringEscape: invalid escape character
+file:1:5 (4): rule StringLiteral: string literal not terminated`,
+ "a = '\\x\n": `file:1:7 (6): rule HexEscape: invalid hexadecimal escape
+file:1:5 (4): rule StringLiteral: string literal not terminated`,
+ "a = '\\0\n": `file:1:7 (6): rule OctalEscape: invalid octal escape
+file:1:5 (4): rule StringLiteral: string literal not terminated`,
+ "a = '\\u\n": `file:1:7 (6): rule ShortUnicodeEscape: invalid Unicode escape
+file:1:5 (4): rule StringLiteral: string literal not terminated`,
+ "a = '\\U\n": `file:1:7 (6): rule LongUnicodeEscape: invalid Unicode escape
+file:1:5 (4): rule StringLiteral: string literal not terminated`,
+ "a = \"\\\n": `file:2:0 (6): rule DoubleStringEscape: invalid escape character
+file:1:5 (4): rule StringLiteral: string literal not terminated`,
+ "a = \"\\x\n": `file:1:7 (6): rule HexEscape: invalid hexadecimal escape
+file:1:5 (4): rule StringLiteral: string literal not terminated`,
+ "a = \"\\0\n": `file:1:7 (6): rule OctalEscape: invalid octal escape
+file:1:5 (4): rule StringLiteral: string literal not terminated`,
+ "a = \"\\u\n": `file:1:7 (6): rule ShortUnicodeEscape: invalid Unicode escape
+file:1:5 (4): rule StringLiteral: string literal not terminated`,
+ "a = \"\\U\n": `file:1:7 (6): rule LongUnicodeEscape: invalid Unicode escape
+file:1:5 (4): rule StringLiteral: string literal not terminated`,
+ "a = [\\\n": `file:2:0 (6): rule CharClassEscape: invalid escape character
+file:1:5 (4): rule CharClassMatcher: character class not terminated`,
+ "a = [\\x\n": `file:1:7 (6): rule HexEscape: invalid hexadecimal escape
+file:1:5 (4): rule CharClassMatcher: character class not terminated`,
+ "a = [\\0\n": `file:1:7 (6): rule OctalEscape: invalid octal escape
+file:1:5 (4): rule CharClassMatcher: character class not terminated`,
+ "a = [\\u\n": `file:1:7 (6): rule ShortUnicodeEscape: invalid Unicode escape
+file:1:5 (4): rule CharClassMatcher: character class not terminated`,
+ "a = [\\U\n": `file:1:7 (6): rule LongUnicodeEscape: invalid Unicode escape
+file:1:5 (4): rule CharClassMatcher: character class not terminated`,
+ "a = [\\p\n": `file:2:0 (7): rule UnicodeClassEscape: invalid Unicode class escape
+file:1:5 (4): rule CharClassMatcher: character class not terminated`,
+ "a = [\\p{\n": `file:1:5 (4): rule CharClassMatcher: character class not terminated`,
+
+ // escapes followed by EOF
+ "a = '\\": `file:1:7 (6): rule SingleStringEscape: invalid escape character
+file:1:5 (4): rule StringLiteral: string literal not terminated`,
+ "a = '\\x": `file:1:7 (6): rule HexEscape: invalid hexadecimal escape
+file:1:5 (4): rule StringLiteral: string literal not terminated`,
+ "a = '\\0": `file:1:7 (6): rule OctalEscape: invalid octal escape
+file:1:5 (4): rule StringLiteral: string literal not terminated`,
+ "a = '\\u": `file:1:7 (6): rule ShortUnicodeEscape: invalid Unicode escape
+file:1:5 (4): rule StringLiteral: string literal not terminated`,
+ "a = '\\U": `file:1:7 (6): rule LongUnicodeEscape: invalid Unicode escape
+file:1:5 (4): rule StringLiteral: string literal not terminated`,
+ "a = \"\\": `file:1:7 (6): rule DoubleStringEscape: invalid escape character
+file:1:5 (4): rule StringLiteral: string literal not terminated`,
+ "a = \"\\x": `file:1:7 (6): rule HexEscape: invalid hexadecimal escape
+file:1:5 (4): rule StringLiteral: string literal not terminated`,
+ "a = \"\\0": `file:1:7 (6): rule OctalEscape: invalid octal escape
+file:1:5 (4): rule StringLiteral: string literal not terminated`,
+ "a = \"\\u": `file:1:7 (6): rule ShortUnicodeEscape: invalid Unicode escape
+file:1:5 (4): rule StringLiteral: string literal not terminated`,
+ "a = \"\\U": `file:1:7 (6): rule LongUnicodeEscape: invalid Unicode escape
+file:1:5 (4): rule StringLiteral: string literal not terminated`,
+ "a = [\\": `file:1:7 (6): rule CharClassEscape: invalid escape character
+file:1:5 (4): rule CharClassMatcher: character class not terminated`,
+ "a = [\\x": `file:1:7 (6): rule HexEscape: invalid hexadecimal escape
+file:1:5 (4): rule CharClassMatcher: character class not terminated`,
+ "a = [\\0": `file:1:7 (6): rule OctalEscape: invalid octal escape
+file:1:5 (4): rule CharClassMatcher: character class not terminated`,
+ "a = [\\u": `file:1:7 (6): rule ShortUnicodeEscape: invalid Unicode escape
+file:1:5 (4): rule CharClassMatcher: character class not terminated`,
+ "a = [\\U": `file:1:7 (6): rule LongUnicodeEscape: invalid Unicode escape
+file:1:5 (4): rule CharClassMatcher: character class not terminated`,
+ "a = [\\p": `file:1:8 (7): rule UnicodeClassEscape: invalid Unicode class escape
+file:1:5 (4): rule CharClassMatcher: character class not terminated`,
+ "a = [\\p{": `file:1:5 (4): rule CharClassMatcher: character class not terminated`,
+
+ // multi-char escapes, fail after 2 chars
+ `a = '\x0z'`: "file:1:7 (6): rule HexEscape: invalid hexadecimal escape",
+ `a = '\00z'`: "file:1:7 (6): rule OctalEscape: invalid octal escape",
+ `a = '\u0z'`: "file:1:7 (6): rule ShortUnicodeEscape: invalid Unicode escape",
+ `a = '\U0z'`: "file:1:7 (6): rule LongUnicodeEscape: invalid Unicode escape",
+ // multi-char escapes, fail after 3 chars
+ `a = '\u00z'`: "file:1:7 (6): rule ShortUnicodeEscape: invalid Unicode escape",
+ `a = '\U00z'`: "file:1:7 (6): rule LongUnicodeEscape: invalid Unicode escape",
+ // multi-char escapes, fail after 4 chars
+ `a = '\u000z'`: "file:1:7 (6): rule ShortUnicodeEscape: invalid Unicode escape",
+ `a = '\U000z'`: "file:1:7 (6): rule LongUnicodeEscape: invalid Unicode escape",
+ // multi-char escapes, fail after 5 chars
+ `a = '\U0000z'`: "file:1:7 (6): rule LongUnicodeEscape: invalid Unicode escape",
+ // multi-char escapes, fail after 6 chars
+ `a = '\U00000z'`: "file:1:7 (6): rule LongUnicodeEscape: invalid Unicode escape",
+ // multi-char escapes, fail after 7 chars
+ `a = '\U000000z'`: "file:1:7 (6): rule LongUnicodeEscape: invalid Unicode escape",
+
+ // combine escape errors
+ `a = "\a\b\c\t\n\r\xab\xz\ux"`: `file:1:11 (10): rule DoubleStringEscape: invalid escape character
+file:1:23 (22): rule HexEscape: invalid hexadecimal escape
+file:1:26 (25): rule ShortUnicodeEscape: invalid Unicode escape`,
+
+ // syntactically valid escapes, but invalid values
+ `a = "\udfff"`: "file:1:7 (6): rule ShortUnicodeEscape: invalid Unicode escape",
+ `a = "\ud800"`: "file:1:7 (6): rule ShortUnicodeEscape: invalid Unicode escape",
+ `a = "\ud801"`: "file:1:7 (6): rule ShortUnicodeEscape: invalid Unicode escape",
+ `a = "\U00110000"`: "file:1:7 (6): rule LongUnicodeEscape: invalid Unicode escape",
+ `a = "\U0000DFFF"`: "file:1:7 (6): rule LongUnicodeEscape: invalid Unicode escape",
+ `a = "\U0000D800"`: "file:1:7 (6): rule LongUnicodeEscape: invalid Unicode escape",
+ `a = "\U0000D801"`: "file:1:7 (6): rule LongUnicodeEscape: invalid Unicode escape",
+}
+
+func TestInvalidParseCases(t *testing.T) {
+ memo := false
+again:
+ for tc, exp := range invalidParseCases {
+ _, err := Parse("file", []byte(tc), Memoize(memo))
+ if err == nil {
+ t.Errorf("%q: want error, got none", tc)
+ continue
+ }
+ if err.Error() != exp {
+ t.Errorf("%q: want \n%s\n, got \n%s\n", tc, exp, err)
+ }
+ }
+ if !memo {
+ memo = true
+ goto again
+ }
+}
+
+var validParseCases = map[string]*ast.Grammar{
+ "a = b": &ast.Grammar{
+ Rules: []*ast.Rule{
+ {
+ Name: ast.NewIdentifier(ast.Pos{}, "a"),
+ Expr: &ast.RuleRefExpr{Name: ast.NewIdentifier(ast.Pos{}, "b")},
+ },
+ },
+ },
+ "a ← b\nc=d \n e <- f \ng\u27f5h": &ast.Grammar{
+ Rules: []*ast.Rule{
+ {
+ Name: ast.NewIdentifier(ast.Pos{}, "a"),
+ Expr: &ast.RuleRefExpr{Name: ast.NewIdentifier(ast.Pos{}, "b")},
+ },
+ {
+ Name: ast.NewIdentifier(ast.Pos{}, "c"),
+ Expr: &ast.RuleRefExpr{Name: ast.NewIdentifier(ast.Pos{}, "d")},
+ },
+ {
+ Name: ast.NewIdentifier(ast.Pos{}, "e"),
+ Expr: &ast.RuleRefExpr{Name: ast.NewIdentifier(ast.Pos{}, "f")},
+ },
+ {
+ Name: ast.NewIdentifier(ast.Pos{}, "g"),
+ Expr: &ast.RuleRefExpr{Name: ast.NewIdentifier(ast.Pos{}, "h")},
+ },
+ },
+ },
+ `a "A"← b`: &ast.Grammar{
+ Rules: []*ast.Rule{
+ {
+ Name: ast.NewIdentifier(ast.Pos{}, "a"),
+ DisplayName: ast.NewStringLit(ast.Pos{}, `"A"`),
+ Expr: &ast.RuleRefExpr{Name: ast.NewIdentifier(ast.Pos{}, "b")},
+ },
+ },
+ },
+ "{ init \n}\na 'A'← b": &ast.Grammar{
+ Init: ast.NewCodeBlock(ast.Pos{}, "{ init \n}"),
+ Rules: []*ast.Rule{
+ {
+ Name: ast.NewIdentifier(ast.Pos{}, "a"),
+ DisplayName: ast.NewStringLit(ast.Pos{}, `'A'`),
+ Expr: &ast.RuleRefExpr{Name: ast.NewIdentifier(ast.Pos{}, "b")},
+ },
+ },
+ },
+ "a\n<-\nb": &ast.Grammar{
+ Rules: []*ast.Rule{
+ {
+ Name: ast.NewIdentifier(ast.Pos{}, "a"),
+ Expr: &ast.RuleRefExpr{Name: ast.NewIdentifier(ast.Pos{}, "b")},
+ },
+ },
+ },
+ "a\n<-\nb\nc": &ast.Grammar{
+ Rules: []*ast.Rule{
+ {
+ Name: ast.NewIdentifier(ast.Pos{}, "a"),
+ Expr: &ast.SeqExpr{
+ Exprs: []ast.Expression{
+ &ast.RuleRefExpr{Name: ast.NewIdentifier(ast.Pos{}, "b")},
+ &ast.RuleRefExpr{Name: ast.NewIdentifier(ast.Pos{}, "c")},
+ },
+ },
+ },
+ },
+ },
+ "a\n<-\nb\nc\n=\nd": &ast.Grammar{
+ Rules: []*ast.Rule{
+ {
+ Name: ast.NewIdentifier(ast.Pos{}, "a"),
+ Expr: &ast.RuleRefExpr{Name: ast.NewIdentifier(ast.Pos{}, "b")},
+ },
+ {
+ Name: ast.NewIdentifier(ast.Pos{}, "c"),
+ Expr: &ast.RuleRefExpr{Name: ast.NewIdentifier(ast.Pos{}, "d")},
+ },
+ },
+ },
+ "a\n<-\nb\nc\n'C'\n=\nd": &ast.Grammar{
+ Rules: []*ast.Rule{
+ {
+ Name: ast.NewIdentifier(ast.Pos{}, "a"),
+ Expr: &ast.RuleRefExpr{Name: ast.NewIdentifier(ast.Pos{}, "b")},
+ },
+ {
+ Name: ast.NewIdentifier(ast.Pos{}, "c"),
+ DisplayName: ast.NewStringLit(ast.Pos{}, `'C'`),
+ Expr: &ast.RuleRefExpr{Name: ast.NewIdentifier(ast.Pos{}, "d")},
+ },
+ },
+ },
+ `a = [a-def]`: &ast.Grammar{
+ Rules: []*ast.Rule{
+ {
+ Name: ast.NewIdentifier(ast.Pos{}, "a"),
+ Expr: &ast.CharClassMatcher{
+ Chars: []rune{'e', 'f'},
+ Ranges: []rune{'a', 'd'},
+ },
+ },
+ },
+ },
+ `a = [abc-f]`: &ast.Grammar{
+ Rules: []*ast.Rule{
+ {
+ Name: ast.NewIdentifier(ast.Pos{}, "a"),
+ Expr: &ast.CharClassMatcher{
+ Chars: []rune{'a', 'b'},
+ Ranges: []rune{'c', 'f'},
+ },
+ },
+ },
+ },
+ `a = [abc-fg]`: &ast.Grammar{
+ Rules: []*ast.Rule{
+ {
+ Name: ast.NewIdentifier(ast.Pos{}, "a"),
+ Expr: &ast.CharClassMatcher{
+ Chars: []rune{'a', 'b', 'g'},
+ Ranges: []rune{'c', 'f'},
+ },
+ },
+ },
+ },
+ `a = [abc-fgh-l]`: &ast.Grammar{
+ Rules: []*ast.Rule{
+ {
+ Name: ast.NewIdentifier(ast.Pos{}, "a"),
+ Expr: &ast.CharClassMatcher{
+ Chars: []rune{'a', 'b', 'g'},
+ Ranges: []rune{'c', 'f', 'h', 'l'},
+ },
+ },
+ },
+ },
+ `a = [\x00-\xabc]`: &ast.Grammar{
+ Rules: []*ast.Rule{
+ {
+ Name: ast.NewIdentifier(ast.Pos{}, "a"),
+ Expr: &ast.CharClassMatcher{
+ Chars: []rune{'c'},
+ Ranges: []rune{'\x00', '\xab'},
+ },
+ },
+ },
+ },
+ `a = [-a-b]`: &ast.Grammar{
+ Rules: []*ast.Rule{
+ {
+ Name: ast.NewIdentifier(ast.Pos{}, "a"),
+ Expr: &ast.CharClassMatcher{
+ Chars: []rune{'-'},
+ Ranges: []rune{'a', 'b'},
+ },
+ },
+ },
+ },
+ `a = [a-b-d]`: &ast.Grammar{
+ Rules: []*ast.Rule{
+ {
+ Name: ast.NewIdentifier(ast.Pos{}, "a"),
+ Expr: &ast.CharClassMatcher{
+ Chars: []rune{'-', 'd'},
+ Ranges: []rune{'a', 'b'},
+ },
+ },
+ },
+ },
+ `a = [\u0012\123]`: &ast.Grammar{
+ Rules: []*ast.Rule{
+ {
+ Name: ast.NewIdentifier(ast.Pos{}, "a"),
+ Expr: &ast.CharClassMatcher{
+ Chars: []rune{'\u0012', '\123'},
+ },
+ },
+ },
+ },
+ `a = [-\u0012-\U00001234]`: &ast.Grammar{
+ Rules: []*ast.Rule{
+ {
+ Name: ast.NewIdentifier(ast.Pos{}, "a"),
+ Expr: &ast.CharClassMatcher{
+ Chars: []rune{'-'},
+ Ranges: []rune{'\u0012', '\U00001234'},
+ },
+ },
+ },
+ },
+ `a = [\p{Latin}]`: &ast.Grammar{
+ Rules: []*ast.Rule{
+ {
+ Name: ast.NewIdentifier(ast.Pos{}, "a"),
+ Expr: &ast.CharClassMatcher{
+ UnicodeClasses: []string{"Latin"},
+ },
+ },
+ },
+ },
+ `a = [\p{Latin}\pZ]`: &ast.Grammar{
+ Rules: []*ast.Rule{
+ {
+ Name: ast.NewIdentifier(ast.Pos{}, "a"),
+ Expr: &ast.CharClassMatcher{
+ UnicodeClasses: []string{"Latin", "Z"},
+ },
+ },
+ },
+ },
+ "a = `a\nb\nc`": &ast.Grammar{
+ Rules: []*ast.Rule{
+ {
+ Name: ast.NewIdentifier(ast.Pos{}, "a"),
+ Expr: ast.NewLitMatcher(ast.Pos{}, "a\nb\nc"),
+ },
+ },
+ },
+ "a = ``": &ast.Grammar{
+ Rules: []*ast.Rule{
+ {
+ Name: ast.NewIdentifier(ast.Pos{}, "a"),
+ Expr: ast.NewLitMatcher(ast.Pos{}, ""),
+ },
+ },
+ },
+}
+
+func TestValidParseCases(t *testing.T) {
+ memo := false
+again:
+ for tc, exp := range validParseCases {
+ got, err := Parse("", []byte(tc))
+ if err != nil {
+ t.Errorf("%q: got error %v", tc, err)
+ continue
+ }
+ gotg, ok := got.(*ast.Grammar)
+ if !ok {
+ t.Errorf("%q: want grammar type %T, got %T", tc, exp, got)
+ continue
+ }
+ compareGrammars(t, tc, exp, gotg)
+ }
+ if !memo {
+ memo = true
+ goto again
+ }
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/pigeon.go b/vendor/github.com/PuerkitoBio/pigeon/pigeon.go
new file mode 100644
index 0000000000..a783582d5e
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/pigeon.go
@@ -0,0 +1,3698 @@
+package main
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/PuerkitoBio/pigeon/ast"
+)
+
+var g = &grammar{
+ rules: []*rule{
+ {
+ name: "Grammar",
+ pos: position{line: 5, col: 1, offset: 18},
+ expr: &actionExpr{
+ pos: position{line: 5, col: 11, offset: 30},
+ run: (*parser).callonGrammar1,
+ expr: &seqExpr{
+ pos: position{line: 5, col: 11, offset: 30},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 5, col: 11, offset: 30},
+ name: "__",
+ },
+ &labeledExpr{
+ pos: position{line: 5, col: 14, offset: 33},
+ label: "initializer",
+ expr: &zeroOrOneExpr{
+ pos: position{line: 5, col: 26, offset: 45},
+ expr: &seqExpr{
+ pos: position{line: 5, col: 28, offset: 47},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 5, col: 28, offset: 47},
+ name: "Initializer",
+ },
+ &ruleRefExpr{
+ pos: position{line: 5, col: 40, offset: 59},
+ name: "__",
+ },
+ },
+ },
+ },
+ },
+ &labeledExpr{
+ pos: position{line: 5, col: 46, offset: 65},
+ label: "rules",
+ expr: &oneOrMoreExpr{
+ pos: position{line: 5, col: 52, offset: 71},
+ expr: &seqExpr{
+ pos: position{line: 5, col: 54, offset: 73},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 5, col: 54, offset: 73},
+ name: "Rule",
+ },
+ &ruleRefExpr{
+ pos: position{line: 5, col: 59, offset: 78},
+ name: "__",
+ },
+ },
+ },
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 5, col: 65, offset: 84},
+ name: "EOF",
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Initializer",
+ pos: position{line: 24, col: 1, offset: 525},
+ expr: &actionExpr{
+ pos: position{line: 24, col: 15, offset: 541},
+ run: (*parser).callonInitializer1,
+ expr: &seqExpr{
+ pos: position{line: 24, col: 15, offset: 541},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 24, col: 15, offset: 541},
+ label: "code",
+ expr: &ruleRefExpr{
+ pos: position{line: 24, col: 20, offset: 546},
+ name: "CodeBlock",
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 24, col: 30, offset: 556},
+ name: "EOS",
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Rule",
+ pos: position{line: 28, col: 1, offset: 586},
+ expr: &actionExpr{
+ pos: position{line: 28, col: 8, offset: 595},
+ run: (*parser).callonRule1,
+ expr: &seqExpr{
+ pos: position{line: 28, col: 8, offset: 595},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 28, col: 8, offset: 595},
+ label: "name",
+ expr: &ruleRefExpr{
+ pos: position{line: 28, col: 13, offset: 600},
+ name: "IdentifierName",
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 28, col: 28, offset: 615},
+ name: "__",
+ },
+ &labeledExpr{
+ pos: position{line: 28, col: 31, offset: 618},
+ label: "display",
+ expr: &zeroOrOneExpr{
+ pos: position{line: 28, col: 39, offset: 626},
+ expr: &seqExpr{
+ pos: position{line: 28, col: 41, offset: 628},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 28, col: 41, offset: 628},
+ name: "StringLiteral",
+ },
+ &ruleRefExpr{
+ pos: position{line: 28, col: 55, offset: 642},
+ name: "__",
+ },
+ },
+ },
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 28, col: 61, offset: 648},
+ name: "RuleDefOp",
+ },
+ &ruleRefExpr{
+ pos: position{line: 28, col: 71, offset: 658},
+ name: "__",
+ },
+ &labeledExpr{
+ pos: position{line: 28, col: 74, offset: 661},
+ label: "expr",
+ expr: &ruleRefExpr{
+ pos: position{line: 28, col: 79, offset: 666},
+ name: "Expression",
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 28, col: 90, offset: 677},
+ name: "EOS",
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Expression",
+ pos: position{line: 41, col: 1, offset: 961},
+ expr: &ruleRefExpr{
+ pos: position{line: 41, col: 14, offset: 976},
+ name: "ChoiceExpr",
+ },
+ },
+ {
+ name: "ChoiceExpr",
+ pos: position{line: 43, col: 1, offset: 988},
+ expr: &actionExpr{
+ pos: position{line: 43, col: 14, offset: 1003},
+ run: (*parser).callonChoiceExpr1,
+ expr: &seqExpr{
+ pos: position{line: 43, col: 14, offset: 1003},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 43, col: 14, offset: 1003},
+ label: "first",
+ expr: &ruleRefExpr{
+ pos: position{line: 43, col: 20, offset: 1009},
+ name: "ActionExpr",
+ },
+ },
+ &labeledExpr{
+ pos: position{line: 43, col: 31, offset: 1020},
+ label: "rest",
+ expr: &zeroOrMoreExpr{
+ pos: position{line: 43, col: 36, offset: 1025},
+ expr: &seqExpr{
+ pos: position{line: 43, col: 38, offset: 1027},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 43, col: 38, offset: 1027},
+ name: "__",
+ },
+ &litMatcher{
+ pos: position{line: 43, col: 41, offset: 1030},
+ val: "/",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 43, col: 45, offset: 1034},
+ name: "__",
+ },
+ &ruleRefExpr{
+ pos: position{line: 43, col: 48, offset: 1037},
+ name: "ActionExpr",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "ActionExpr",
+ pos: position{line: 58, col: 1, offset: 1442},
+ expr: &actionExpr{
+ pos: position{line: 58, col: 14, offset: 1457},
+ run: (*parser).callonActionExpr1,
+ expr: &seqExpr{
+ pos: position{line: 58, col: 14, offset: 1457},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 58, col: 14, offset: 1457},
+ label: "expr",
+ expr: &ruleRefExpr{
+ pos: position{line: 58, col: 19, offset: 1462},
+ name: "SeqExpr",
+ },
+ },
+ &labeledExpr{
+ pos: position{line: 58, col: 27, offset: 1470},
+ label: "code",
+ expr: &zeroOrOneExpr{
+ pos: position{line: 58, col: 32, offset: 1475},
+ expr: &seqExpr{
+ pos: position{line: 58, col: 34, offset: 1477},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 58, col: 34, offset: 1477},
+ name: "__",
+ },
+ &ruleRefExpr{
+ pos: position{line: 58, col: 37, offset: 1480},
+ name: "CodeBlock",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "SeqExpr",
+ pos: position{line: 72, col: 1, offset: 1746},
+ expr: &actionExpr{
+ pos: position{line: 72, col: 11, offset: 1758},
+ run: (*parser).callonSeqExpr1,
+ expr: &seqExpr{
+ pos: position{line: 72, col: 11, offset: 1758},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 72, col: 11, offset: 1758},
+ label: "first",
+ expr: &ruleRefExpr{
+ pos: position{line: 72, col: 17, offset: 1764},
+ name: "LabeledExpr",
+ },
+ },
+ &labeledExpr{
+ pos: position{line: 72, col: 29, offset: 1776},
+ label: "rest",
+ expr: &zeroOrMoreExpr{
+ pos: position{line: 72, col: 34, offset: 1781},
+ expr: &seqExpr{
+ pos: position{line: 72, col: 36, offset: 1783},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 72, col: 36, offset: 1783},
+ name: "__",
+ },
+ &ruleRefExpr{
+ pos: position{line: 72, col: 39, offset: 1786},
+ name: "LabeledExpr",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "LabeledExpr",
+ pos: position{line: 85, col: 1, offset: 2137},
+ expr: &choiceExpr{
+ pos: position{line: 85, col: 15, offset: 2153},
+ alternatives: []interface{}{
+ &actionExpr{
+ pos: position{line: 85, col: 15, offset: 2153},
+ run: (*parser).callonLabeledExpr2,
+ expr: &seqExpr{
+ pos: position{line: 85, col: 15, offset: 2153},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 85, col: 15, offset: 2153},
+ label: "label",
+ expr: &ruleRefExpr{
+ pos: position{line: 85, col: 21, offset: 2159},
+ name: "Identifier",
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 85, col: 32, offset: 2170},
+ name: "__",
+ },
+ &litMatcher{
+ pos: position{line: 85, col: 35, offset: 2173},
+ val: ":",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 85, col: 39, offset: 2177},
+ name: "__",
+ },
+ &labeledExpr{
+ pos: position{line: 85, col: 42, offset: 2180},
+ label: "expr",
+ expr: &ruleRefExpr{
+ pos: position{line: 85, col: 47, offset: 2185},
+ name: "PrefixedExpr",
+ },
+ },
+ },
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 91, col: 5, offset: 2358},
+ name: "PrefixedExpr",
+ },
+ },
+ },
+ },
+ {
+ name: "PrefixedExpr",
+ pos: position{line: 93, col: 1, offset: 2372},
+ expr: &choiceExpr{
+ pos: position{line: 93, col: 16, offset: 2389},
+ alternatives: []interface{}{
+ &actionExpr{
+ pos: position{line: 93, col: 16, offset: 2389},
+ run: (*parser).callonPrefixedExpr2,
+ expr: &seqExpr{
+ pos: position{line: 93, col: 16, offset: 2389},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 93, col: 16, offset: 2389},
+ label: "op",
+ expr: &ruleRefExpr{
+ pos: position{line: 93, col: 19, offset: 2392},
+ name: "PrefixedOp",
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 93, col: 30, offset: 2403},
+ name: "__",
+ },
+ &labeledExpr{
+ pos: position{line: 93, col: 33, offset: 2406},
+ label: "expr",
+ expr: &ruleRefExpr{
+ pos: position{line: 93, col: 38, offset: 2411},
+ name: "SuffixedExpr",
+ },
+ },
+ },
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 104, col: 5, offset: 2693},
+ name: "SuffixedExpr",
+ },
+ },
+ },
+ },
+ {
+ name: "PrefixedOp",
+ pos: position{line: 106, col: 1, offset: 2707},
+ expr: &actionExpr{
+ pos: position{line: 106, col: 14, offset: 2722},
+ run: (*parser).callonPrefixedOp1,
+ expr: &choiceExpr{
+ pos: position{line: 106, col: 16, offset: 2724},
+ alternatives: []interface{}{
+ &litMatcher{
+ pos: position{line: 106, col: 16, offset: 2724},
+ val: "&",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 106, col: 22, offset: 2730},
+ val: "!",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "SuffixedExpr",
+ pos: position{line: 110, col: 1, offset: 2772},
+ expr: &choiceExpr{
+ pos: position{line: 110, col: 16, offset: 2789},
+ alternatives: []interface{}{
+ &actionExpr{
+ pos: position{line: 110, col: 16, offset: 2789},
+ run: (*parser).callonSuffixedExpr2,
+ expr: &seqExpr{
+ pos: position{line: 110, col: 16, offset: 2789},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 110, col: 16, offset: 2789},
+ label: "expr",
+ expr: &ruleRefExpr{
+ pos: position{line: 110, col: 21, offset: 2794},
+ name: "PrimaryExpr",
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 110, col: 33, offset: 2806},
+ name: "__",
+ },
+ &labeledExpr{
+ pos: position{line: 110, col: 36, offset: 2809},
+ label: "op",
+ expr: &ruleRefExpr{
+ pos: position{line: 110, col: 39, offset: 2812},
+ name: "SuffixedOp",
+ },
+ },
+ },
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 129, col: 5, offset: 3342},
+ name: "PrimaryExpr",
+ },
+ },
+ },
+ },
+ {
+ name: "SuffixedOp",
+ pos: position{line: 131, col: 1, offset: 3356},
+ expr: &actionExpr{
+ pos: position{line: 131, col: 14, offset: 3371},
+ run: (*parser).callonSuffixedOp1,
+ expr: &choiceExpr{
+ pos: position{line: 131, col: 16, offset: 3373},
+ alternatives: []interface{}{
+ &litMatcher{
+ pos: position{line: 131, col: 16, offset: 3373},
+ val: "?",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 131, col: 22, offset: 3379},
+ val: "*",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 131, col: 28, offset: 3385},
+ val: "+",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "PrimaryExpr",
+ pos: position{line: 135, col: 1, offset: 3427},
+ expr: &choiceExpr{
+ pos: position{line: 135, col: 15, offset: 3443},
+ alternatives: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 135, col: 15, offset: 3443},
+ name: "LitMatcher",
+ },
+ &ruleRefExpr{
+ pos: position{line: 135, col: 28, offset: 3456},
+ name: "CharClassMatcher",
+ },
+ &ruleRefExpr{
+ pos: position{line: 135, col: 47, offset: 3475},
+ name: "AnyMatcher",
+ },
+ &ruleRefExpr{
+ pos: position{line: 135, col: 60, offset: 3488},
+ name: "RuleRefExpr",
+ },
+ &ruleRefExpr{
+ pos: position{line: 135, col: 74, offset: 3502},
+ name: "SemanticPredExpr",
+ },
+ &actionExpr{
+ pos: position{line: 135, col: 93, offset: 3521},
+ run: (*parser).callonPrimaryExpr7,
+ expr: &seqExpr{
+ pos: position{line: 135, col: 93, offset: 3521},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 135, col: 93, offset: 3521},
+ val: "(",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 135, col: 97, offset: 3525},
+ name: "__",
+ },
+ &labeledExpr{
+ pos: position{line: 135, col: 100, offset: 3528},
+ label: "expr",
+ expr: &ruleRefExpr{
+ pos: position{line: 135, col: 105, offset: 3533},
+ name: "Expression",
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 135, col: 116, offset: 3544},
+ name: "__",
+ },
+ &litMatcher{
+ pos: position{line: 135, col: 119, offset: 3547},
+ val: ")",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "RuleRefExpr",
+ pos: position{line: 138, col: 1, offset: 3576},
+ expr: &actionExpr{
+ pos: position{line: 138, col: 15, offset: 3592},
+ run: (*parser).callonRuleRefExpr1,
+ expr: &seqExpr{
+ pos: position{line: 138, col: 15, offset: 3592},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 138, col: 15, offset: 3592},
+ label: "name",
+ expr: &ruleRefExpr{
+ pos: position{line: 138, col: 20, offset: 3597},
+ name: "IdentifierName",
+ },
+ },
+ ¬Expr{
+ pos: position{line: 138, col: 35, offset: 3612},
+ expr: &seqExpr{
+ pos: position{line: 138, col: 38, offset: 3615},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 138, col: 38, offset: 3615},
+ name: "__",
+ },
+ &zeroOrOneExpr{
+ pos: position{line: 138, col: 41, offset: 3618},
+ expr: &seqExpr{
+ pos: position{line: 138, col: 43, offset: 3620},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 138, col: 43, offset: 3620},
+ name: "StringLiteral",
+ },
+ &ruleRefExpr{
+ pos: position{line: 138, col: 57, offset: 3634},
+ name: "__",
+ },
+ },
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 138, col: 63, offset: 3640},
+ name: "RuleDefOp",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "SemanticPredExpr",
+ pos: position{line: 143, col: 1, offset: 3756},
+ expr: &actionExpr{
+ pos: position{line: 143, col: 20, offset: 3777},
+ run: (*parser).callonSemanticPredExpr1,
+ expr: &seqExpr{
+ pos: position{line: 143, col: 20, offset: 3777},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 143, col: 20, offset: 3777},
+ label: "op",
+ expr: &ruleRefExpr{
+ pos: position{line: 143, col: 23, offset: 3780},
+ name: "SemanticPredOp",
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 143, col: 38, offset: 3795},
+ name: "__",
+ },
+ &labeledExpr{
+ pos: position{line: 143, col: 41, offset: 3798},
+ label: "code",
+ expr: &ruleRefExpr{
+ pos: position{line: 143, col: 46, offset: 3803},
+ name: "CodeBlock",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "SemanticPredOp",
+ pos: position{line: 154, col: 1, offset: 4080},
+ expr: &actionExpr{
+ pos: position{line: 154, col: 18, offset: 4099},
+ run: (*parser).callonSemanticPredOp1,
+ expr: &choiceExpr{
+ pos: position{line: 154, col: 20, offset: 4101},
+ alternatives: []interface{}{
+ &litMatcher{
+ pos: position{line: 154, col: 20, offset: 4101},
+ val: "&",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 154, col: 26, offset: 4107},
+ val: "!",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "RuleDefOp",
+ pos: position{line: 158, col: 1, offset: 4149},
+ expr: &choiceExpr{
+ pos: position{line: 158, col: 13, offset: 4163},
+ alternatives: []interface{}{
+ &litMatcher{
+ pos: position{line: 158, col: 13, offset: 4163},
+ val: "=",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 158, col: 19, offset: 4169},
+ val: "<-",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 158, col: 26, offset: 4176},
+ val: "←",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 158, col: 37, offset: 4187},
+ val: "⟵",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ {
+ name: "SourceChar",
+ pos: position{line: 160, col: 1, offset: 4197},
+ expr: &anyMatcher{
+ line: 160, col: 14, offset: 4212,
+ },
+ },
+ {
+ name: "Comment",
+ pos: position{line: 161, col: 1, offset: 4214},
+ expr: &choiceExpr{
+ pos: position{line: 161, col: 11, offset: 4226},
+ alternatives: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 161, col: 11, offset: 4226},
+ name: "MultiLineComment",
+ },
+ &ruleRefExpr{
+ pos: position{line: 161, col: 30, offset: 4245},
+ name: "SingleLineComment",
+ },
+ },
+ },
+ },
+ {
+ name: "MultiLineComment",
+ pos: position{line: 162, col: 1, offset: 4263},
+ expr: &seqExpr{
+ pos: position{line: 162, col: 20, offset: 4284},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 162, col: 20, offset: 4284},
+ val: "/*",
+ ignoreCase: false,
+ },
+ &zeroOrMoreExpr{
+ pos: position{line: 162, col: 25, offset: 4289},
+ expr: &seqExpr{
+ pos: position{line: 162, col: 27, offset: 4291},
+ exprs: []interface{}{
+ ¬Expr{
+ pos: position{line: 162, col: 27, offset: 4291},
+ expr: &litMatcher{
+ pos: position{line: 162, col: 28, offset: 4292},
+ val: "*/",
+ ignoreCase: false,
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 162, col: 33, offset: 4297},
+ name: "SourceChar",
+ },
+ },
+ },
+ },
+ &litMatcher{
+ pos: position{line: 162, col: 47, offset: 4311},
+ val: "*/",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ {
+ name: "MultiLineCommentNoLineTerminator",
+ pos: position{line: 163, col: 1, offset: 4316},
+ expr: &seqExpr{
+ pos: position{line: 163, col: 36, offset: 4353},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 163, col: 36, offset: 4353},
+ val: "/*",
+ ignoreCase: false,
+ },
+ &zeroOrMoreExpr{
+ pos: position{line: 163, col: 41, offset: 4358},
+ expr: &seqExpr{
+ pos: position{line: 163, col: 43, offset: 4360},
+ exprs: []interface{}{
+ ¬Expr{
+ pos: position{line: 163, col: 43, offset: 4360},
+ expr: &choiceExpr{
+ pos: position{line: 163, col: 46, offset: 4363},
+ alternatives: []interface{}{
+ &litMatcher{
+ pos: position{line: 163, col: 46, offset: 4363},
+ val: "*/",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 163, col: 53, offset: 4370},
+ name: "EOL",
+ },
+ },
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 163, col: 59, offset: 4376},
+ name: "SourceChar",
+ },
+ },
+ },
+ },
+ &litMatcher{
+ pos: position{line: 163, col: 73, offset: 4390},
+ val: "*/",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ {
+ name: "SingleLineComment",
+ pos: position{line: 164, col: 1, offset: 4395},
+ expr: &seqExpr{
+ pos: position{line: 164, col: 21, offset: 4417},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 164, col: 21, offset: 4417},
+ val: "//",
+ ignoreCase: false,
+ },
+ &zeroOrMoreExpr{
+ pos: position{line: 164, col: 26, offset: 4422},
+ expr: &seqExpr{
+ pos: position{line: 164, col: 28, offset: 4424},
+ exprs: []interface{}{
+ ¬Expr{
+ pos: position{line: 164, col: 28, offset: 4424},
+ expr: &ruleRefExpr{
+ pos: position{line: 164, col: 29, offset: 4425},
+ name: "EOL",
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 164, col: 33, offset: 4429},
+ name: "SourceChar",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Identifier",
+ pos: position{line: 166, col: 1, offset: 4444},
+ expr: &actionExpr{
+ pos: position{line: 166, col: 14, offset: 4459},
+ run: (*parser).callonIdentifier1,
+ expr: &labeledExpr{
+ pos: position{line: 166, col: 14, offset: 4459},
+ label: "ident",
+ expr: &ruleRefExpr{
+ pos: position{line: 166, col: 20, offset: 4465},
+ name: "IdentifierName",
+ },
+ },
+ },
+ },
+ {
+ name: "IdentifierName",
+ pos: position{line: 174, col: 1, offset: 4684},
+ expr: &actionExpr{
+ pos: position{line: 174, col: 18, offset: 4703},
+ run: (*parser).callonIdentifierName1,
+ expr: &seqExpr{
+ pos: position{line: 174, col: 18, offset: 4703},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 174, col: 18, offset: 4703},
+ name: "IdentifierStart",
+ },
+ &zeroOrMoreExpr{
+ pos: position{line: 174, col: 34, offset: 4719},
+ expr: &ruleRefExpr{
+ pos: position{line: 174, col: 34, offset: 4719},
+ name: "IdentifierPart",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "IdentifierStart",
+ pos: position{line: 177, col: 1, offset: 4801},
+ expr: &charClassMatcher{
+ pos: position{line: 177, col: 19, offset: 4821},
+ val: "[\\pL_]",
+ chars: []rune{'_'},
+ classes: []*unicode.RangeTable{rangeTable("L")},
+ ignoreCase: false,
+ inverted: false,
+ },
+ },
+ {
+ name: "IdentifierPart",
+ pos: position{line: 178, col: 1, offset: 4828},
+ expr: &choiceExpr{
+ pos: position{line: 178, col: 18, offset: 4847},
+ alternatives: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 178, col: 18, offset: 4847},
+ name: "IdentifierStart",
+ },
+ &charClassMatcher{
+ pos: position{line: 178, col: 36, offset: 4865},
+ val: "[\\p{Nd}]",
+ classes: []*unicode.RangeTable{rangeTable("Nd")},
+ ignoreCase: false,
+ inverted: false,
+ },
+ },
+ },
+ },
+ {
+ name: "LitMatcher",
+ pos: position{line: 180, col: 1, offset: 4875},
+ expr: &actionExpr{
+ pos: position{line: 180, col: 14, offset: 4890},
+ run: (*parser).callonLitMatcher1,
+ expr: &seqExpr{
+ pos: position{line: 180, col: 14, offset: 4890},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 180, col: 14, offset: 4890},
+ label: "lit",
+ expr: &ruleRefExpr{
+ pos: position{line: 180, col: 18, offset: 4894},
+ name: "StringLiteral",
+ },
+ },
+ &labeledExpr{
+ pos: position{line: 180, col: 32, offset: 4908},
+ label: "ignore",
+ expr: &zeroOrOneExpr{
+ pos: position{line: 180, col: 39, offset: 4915},
+ expr: &litMatcher{
+ pos: position{line: 180, col: 39, offset: 4915},
+ val: "i",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "StringLiteral",
+ pos: position{line: 193, col: 1, offset: 5314},
+ expr: &choiceExpr{
+ pos: position{line: 193, col: 17, offset: 5332},
+ alternatives: []interface{}{
+ &actionExpr{
+ pos: position{line: 193, col: 17, offset: 5332},
+ run: (*parser).callonStringLiteral2,
+ expr: &choiceExpr{
+ pos: position{line: 193, col: 19, offset: 5334},
+ alternatives: []interface{}{
+ &seqExpr{
+ pos: position{line: 193, col: 19, offset: 5334},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 193, col: 19, offset: 5334},
+ val: "\"",
+ ignoreCase: false,
+ },
+ &zeroOrMoreExpr{
+ pos: position{line: 193, col: 23, offset: 5338},
+ expr: &ruleRefExpr{
+ pos: position{line: 193, col: 23, offset: 5338},
+ name: "DoubleStringChar",
+ },
+ },
+ &litMatcher{
+ pos: position{line: 193, col: 41, offset: 5356},
+ val: "\"",
+ ignoreCase: false,
+ },
+ },
+ },
+ &seqExpr{
+ pos: position{line: 193, col: 47, offset: 5362},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 193, col: 47, offset: 5362},
+ val: "'",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 193, col: 51, offset: 5366},
+ name: "SingleStringChar",
+ },
+ &litMatcher{
+ pos: position{line: 193, col: 68, offset: 5383},
+ val: "'",
+ ignoreCase: false,
+ },
+ },
+ },
+ &seqExpr{
+ pos: position{line: 193, col: 74, offset: 5389},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 193, col: 74, offset: 5389},
+ val: "`",
+ ignoreCase: false,
+ },
+ &zeroOrMoreExpr{
+ pos: position{line: 193, col: 78, offset: 5393},
+ expr: &ruleRefExpr{
+ pos: position{line: 193, col: 78, offset: 5393},
+ name: "RawStringChar",
+ },
+ },
+ &litMatcher{
+ pos: position{line: 193, col: 93, offset: 5408},
+ val: "`",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ },
+ },
+ &actionExpr{
+ pos: position{line: 195, col: 5, offset: 5481},
+ run: (*parser).callonStringLiteral18,
+ expr: &choiceExpr{
+ pos: position{line: 195, col: 7, offset: 5483},
+ alternatives: []interface{}{
+ &seqExpr{
+ pos: position{line: 195, col: 9, offset: 5485},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 195, col: 9, offset: 5485},
+ val: "\"",
+ ignoreCase: false,
+ },
+ &zeroOrMoreExpr{
+ pos: position{line: 195, col: 13, offset: 5489},
+ expr: &ruleRefExpr{
+ pos: position{line: 195, col: 13, offset: 5489},
+ name: "DoubleStringChar",
+ },
+ },
+ &choiceExpr{
+ pos: position{line: 195, col: 33, offset: 5509},
+ alternatives: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 195, col: 33, offset: 5509},
+ name: "EOL",
+ },
+ &ruleRefExpr{
+ pos: position{line: 195, col: 39, offset: 5515},
+ name: "EOF",
+ },
+ },
+ },
+ },
+ },
+ &seqExpr{
+ pos: position{line: 195, col: 51, offset: 5527},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 195, col: 51, offset: 5527},
+ val: "'",
+ ignoreCase: false,
+ },
+ &zeroOrOneExpr{
+ pos: position{line: 195, col: 55, offset: 5531},
+ expr: &ruleRefExpr{
+ pos: position{line: 195, col: 55, offset: 5531},
+ name: "SingleStringChar",
+ },
+ },
+ &choiceExpr{
+ pos: position{line: 195, col: 75, offset: 5551},
+ alternatives: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 195, col: 75, offset: 5551},
+ name: "EOL",
+ },
+ &ruleRefExpr{
+ pos: position{line: 195, col: 81, offset: 5557},
+ name: "EOF",
+ },
+ },
+ },
+ },
+ },
+ &seqExpr{
+ pos: position{line: 195, col: 91, offset: 5567},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 195, col: 91, offset: 5567},
+ val: "`",
+ ignoreCase: false,
+ },
+ &zeroOrMoreExpr{
+ pos: position{line: 195, col: 95, offset: 5571},
+ expr: &ruleRefExpr{
+ pos: position{line: 195, col: 95, offset: 5571},
+ name: "RawStringChar",
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 195, col: 110, offset: 5586},
+ name: "EOF",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "DoubleStringChar",
+ pos: position{line: 199, col: 1, offset: 5688},
+ expr: &choiceExpr{
+ pos: position{line: 199, col: 20, offset: 5709},
+ alternatives: []interface{}{
+ &seqExpr{
+ pos: position{line: 199, col: 20, offset: 5709},
+ exprs: []interface{}{
+ ¬Expr{
+ pos: position{line: 199, col: 20, offset: 5709},
+ expr: &choiceExpr{
+ pos: position{line: 199, col: 23, offset: 5712},
+ alternatives: []interface{}{
+ &litMatcher{
+ pos: position{line: 199, col: 23, offset: 5712},
+ val: "\"",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 199, col: 29, offset: 5718},
+ val: "\\",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 199, col: 36, offset: 5725},
+ name: "EOL",
+ },
+ },
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 199, col: 42, offset: 5731},
+ name: "SourceChar",
+ },
+ },
+ },
+ &seqExpr{
+ pos: position{line: 199, col: 55, offset: 5744},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 199, col: 55, offset: 5744},
+ val: "\\",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 199, col: 60, offset: 5749},
+ name: "DoubleStringEscape",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "SingleStringChar",
+ pos: position{line: 200, col: 1, offset: 5768},
+ expr: &choiceExpr{
+ pos: position{line: 200, col: 20, offset: 5789},
+ alternatives: []interface{}{
+ &seqExpr{
+ pos: position{line: 200, col: 20, offset: 5789},
+ exprs: []interface{}{
+ ¬Expr{
+ pos: position{line: 200, col: 20, offset: 5789},
+ expr: &choiceExpr{
+ pos: position{line: 200, col: 23, offset: 5792},
+ alternatives: []interface{}{
+ &litMatcher{
+ pos: position{line: 200, col: 23, offset: 5792},
+ val: "'",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 200, col: 29, offset: 5798},
+ val: "\\",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 200, col: 36, offset: 5805},
+ name: "EOL",
+ },
+ },
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 200, col: 42, offset: 5811},
+ name: "SourceChar",
+ },
+ },
+ },
+ &seqExpr{
+ pos: position{line: 200, col: 55, offset: 5824},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 200, col: 55, offset: 5824},
+ val: "\\",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 200, col: 60, offset: 5829},
+ name: "SingleStringEscape",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "RawStringChar",
+ pos: position{line: 201, col: 1, offset: 5848},
+ expr: &seqExpr{
+ pos: position{line: 201, col: 17, offset: 5866},
+ exprs: []interface{}{
+ ¬Expr{
+ pos: position{line: 201, col: 17, offset: 5866},
+ expr: &litMatcher{
+ pos: position{line: 201, col: 18, offset: 5867},
+ val: "`",
+ ignoreCase: false,
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 201, col: 22, offset: 5871},
+ name: "SourceChar",
+ },
+ },
+ },
+ },
+ {
+ name: "DoubleStringEscape",
+ pos: position{line: 203, col: 1, offset: 5883},
+ expr: &choiceExpr{
+ pos: position{line: 203, col: 22, offset: 5906},
+ alternatives: []interface{}{
+ &choiceExpr{
+ pos: position{line: 203, col: 24, offset: 5908},
+ alternatives: []interface{}{
+ &litMatcher{
+ pos: position{line: 203, col: 24, offset: 5908},
+ val: "\"",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 203, col: 30, offset: 5914},
+ name: "CommonEscapeSequence",
+ },
+ },
+ },
+ &actionExpr{
+ pos: position{line: 204, col: 7, offset: 5943},
+ run: (*parser).callonDoubleStringEscape5,
+ expr: &choiceExpr{
+ pos: position{line: 204, col: 9, offset: 5945},
+ alternatives: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 204, col: 9, offset: 5945},
+ name: "SourceChar",
+ },
+ &ruleRefExpr{
+ pos: position{line: 204, col: 22, offset: 5958},
+ name: "EOL",
+ },
+ &ruleRefExpr{
+ pos: position{line: 204, col: 28, offset: 5964},
+ name: "EOF",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "SingleStringEscape",
+ pos: position{line: 207, col: 1, offset: 6029},
+ expr: &choiceExpr{
+ pos: position{line: 207, col: 22, offset: 6052},
+ alternatives: []interface{}{
+ &choiceExpr{
+ pos: position{line: 207, col: 24, offset: 6054},
+ alternatives: []interface{}{
+ &litMatcher{
+ pos: position{line: 207, col: 24, offset: 6054},
+ val: "'",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 207, col: 30, offset: 6060},
+ name: "CommonEscapeSequence",
+ },
+ },
+ },
+ &actionExpr{
+ pos: position{line: 208, col: 7, offset: 6089},
+ run: (*parser).callonSingleStringEscape5,
+ expr: &choiceExpr{
+ pos: position{line: 208, col: 9, offset: 6091},
+ alternatives: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 208, col: 9, offset: 6091},
+ name: "SourceChar",
+ },
+ &ruleRefExpr{
+ pos: position{line: 208, col: 22, offset: 6104},
+ name: "EOL",
+ },
+ &ruleRefExpr{
+ pos: position{line: 208, col: 28, offset: 6110},
+ name: "EOF",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "CommonEscapeSequence",
+ pos: position{line: 212, col: 1, offset: 6176},
+ expr: &choiceExpr{
+ pos: position{line: 212, col: 24, offset: 6201},
+ alternatives: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 212, col: 24, offset: 6201},
+ name: "SingleCharEscape",
+ },
+ &ruleRefExpr{
+ pos: position{line: 212, col: 43, offset: 6220},
+ name: "OctalEscape",
+ },
+ &ruleRefExpr{
+ pos: position{line: 212, col: 57, offset: 6234},
+ name: "HexEscape",
+ },
+ &ruleRefExpr{
+ pos: position{line: 212, col: 69, offset: 6246},
+ name: "LongUnicodeEscape",
+ },
+ &ruleRefExpr{
+ pos: position{line: 212, col: 89, offset: 6266},
+ name: "ShortUnicodeEscape",
+ },
+ },
+ },
+ },
+ {
+ name: "SingleCharEscape",
+ pos: position{line: 213, col: 1, offset: 6285},
+ expr: &choiceExpr{
+ pos: position{line: 213, col: 20, offset: 6306},
+ alternatives: []interface{}{
+ &litMatcher{
+ pos: position{line: 213, col: 20, offset: 6306},
+ val: "a",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 213, col: 26, offset: 6312},
+ val: "b",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 213, col: 32, offset: 6318},
+ val: "n",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 213, col: 38, offset: 6324},
+ val: "f",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 213, col: 44, offset: 6330},
+ val: "r",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 213, col: 50, offset: 6336},
+ val: "t",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 213, col: 56, offset: 6342},
+ val: "v",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 213, col: 62, offset: 6348},
+ val: "\\",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ {
+ name: "OctalEscape",
+ pos: position{line: 214, col: 1, offset: 6353},
+ expr: &choiceExpr{
+ pos: position{line: 214, col: 15, offset: 6369},
+ alternatives: []interface{}{
+ &seqExpr{
+ pos: position{line: 214, col: 15, offset: 6369},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 214, col: 15, offset: 6369},
+ name: "OctalDigit",
+ },
+ &ruleRefExpr{
+ pos: position{line: 214, col: 26, offset: 6380},
+ name: "OctalDigit",
+ },
+ &ruleRefExpr{
+ pos: position{line: 214, col: 37, offset: 6391},
+ name: "OctalDigit",
+ },
+ },
+ },
+ &actionExpr{
+ pos: position{line: 215, col: 7, offset: 6408},
+ run: (*parser).callonOctalEscape6,
+ expr: &seqExpr{
+ pos: position{line: 215, col: 7, offset: 6408},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 215, col: 7, offset: 6408},
+ name: "OctalDigit",
+ },
+ &choiceExpr{
+ pos: position{line: 215, col: 20, offset: 6421},
+ alternatives: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 215, col: 20, offset: 6421},
+ name: "SourceChar",
+ },
+ &ruleRefExpr{
+ pos: position{line: 215, col: 33, offset: 6434},
+ name: "EOL",
+ },
+ &ruleRefExpr{
+ pos: position{line: 215, col: 39, offset: 6440},
+ name: "EOF",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "HexEscape",
+ pos: position{line: 218, col: 1, offset: 6501},
+ expr: &choiceExpr{
+ pos: position{line: 218, col: 13, offset: 6515},
+ alternatives: []interface{}{
+ &seqExpr{
+ pos: position{line: 218, col: 13, offset: 6515},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 218, col: 13, offset: 6515},
+ val: "x",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 218, col: 17, offset: 6519},
+ name: "HexDigit",
+ },
+ &ruleRefExpr{
+ pos: position{line: 218, col: 26, offset: 6528},
+ name: "HexDigit",
+ },
+ },
+ },
+ &actionExpr{
+ pos: position{line: 219, col: 7, offset: 6543},
+ run: (*parser).callonHexEscape6,
+ expr: &seqExpr{
+ pos: position{line: 219, col: 7, offset: 6543},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 219, col: 7, offset: 6543},
+ val: "x",
+ ignoreCase: false,
+ },
+ &choiceExpr{
+ pos: position{line: 219, col: 13, offset: 6549},
+ alternatives: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 219, col: 13, offset: 6549},
+ name: "SourceChar",
+ },
+ &ruleRefExpr{
+ pos: position{line: 219, col: 26, offset: 6562},
+ name: "EOL",
+ },
+ &ruleRefExpr{
+ pos: position{line: 219, col: 32, offset: 6568},
+ name: "EOF",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "LongUnicodeEscape",
+ pos: position{line: 222, col: 1, offset: 6635},
+ expr: &choiceExpr{
+ pos: position{line: 223, col: 5, offset: 6662},
+ alternatives: []interface{}{
+ &actionExpr{
+ pos: position{line: 223, col: 5, offset: 6662},
+ run: (*parser).callonLongUnicodeEscape2,
+ expr: &seqExpr{
+ pos: position{line: 223, col: 5, offset: 6662},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 223, col: 5, offset: 6662},
+ val: "U",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 223, col: 9, offset: 6666},
+ name: "HexDigit",
+ },
+ &ruleRefExpr{
+ pos: position{line: 223, col: 18, offset: 6675},
+ name: "HexDigit",
+ },
+ &ruleRefExpr{
+ pos: position{line: 223, col: 27, offset: 6684},
+ name: "HexDigit",
+ },
+ &ruleRefExpr{
+ pos: position{line: 223, col: 36, offset: 6693},
+ name: "HexDigit",
+ },
+ &ruleRefExpr{
+ pos: position{line: 223, col: 45, offset: 6702},
+ name: "HexDigit",
+ },
+ &ruleRefExpr{
+ pos: position{line: 223, col: 54, offset: 6711},
+ name: "HexDigit",
+ },
+ &ruleRefExpr{
+ pos: position{line: 223, col: 63, offset: 6720},
+ name: "HexDigit",
+ },
+ &ruleRefExpr{
+ pos: position{line: 223, col: 72, offset: 6729},
+ name: "HexDigit",
+ },
+ },
+ },
+ },
+ &actionExpr{
+ pos: position{line: 226, col: 7, offset: 6831},
+ run: (*parser).callonLongUnicodeEscape13,
+ expr: &seqExpr{
+ pos: position{line: 226, col: 7, offset: 6831},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 226, col: 7, offset: 6831},
+ val: "U",
+ ignoreCase: false,
+ },
+ &choiceExpr{
+ pos: position{line: 226, col: 13, offset: 6837},
+ alternatives: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 226, col: 13, offset: 6837},
+ name: "SourceChar",
+ },
+ &ruleRefExpr{
+ pos: position{line: 226, col: 26, offset: 6850},
+ name: "EOL",
+ },
+ &ruleRefExpr{
+ pos: position{line: 226, col: 32, offset: 6856},
+ name: "EOF",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "ShortUnicodeEscape",
+ pos: position{line: 229, col: 1, offset: 6919},
+ expr: &choiceExpr{
+ pos: position{line: 230, col: 5, offset: 6947},
+ alternatives: []interface{}{
+ &actionExpr{
+ pos: position{line: 230, col: 5, offset: 6947},
+ run: (*parser).callonShortUnicodeEscape2,
+ expr: &seqExpr{
+ pos: position{line: 230, col: 5, offset: 6947},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 230, col: 5, offset: 6947},
+ val: "u",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 230, col: 9, offset: 6951},
+ name: "HexDigit",
+ },
+ &ruleRefExpr{
+ pos: position{line: 230, col: 18, offset: 6960},
+ name: "HexDigit",
+ },
+ &ruleRefExpr{
+ pos: position{line: 230, col: 27, offset: 6969},
+ name: "HexDigit",
+ },
+ &ruleRefExpr{
+ pos: position{line: 230, col: 36, offset: 6978},
+ name: "HexDigit",
+ },
+ },
+ },
+ },
+ &actionExpr{
+ pos: position{line: 233, col: 7, offset: 7080},
+ run: (*parser).callonShortUnicodeEscape9,
+ expr: &seqExpr{
+ pos: position{line: 233, col: 7, offset: 7080},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 233, col: 7, offset: 7080},
+ val: "u",
+ ignoreCase: false,
+ },
+ &choiceExpr{
+ pos: position{line: 233, col: 13, offset: 7086},
+ alternatives: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 233, col: 13, offset: 7086},
+ name: "SourceChar",
+ },
+ &ruleRefExpr{
+ pos: position{line: 233, col: 26, offset: 7099},
+ name: "EOL",
+ },
+ &ruleRefExpr{
+ pos: position{line: 233, col: 32, offset: 7105},
+ name: "EOF",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "OctalDigit",
+ pos: position{line: 237, col: 1, offset: 7169},
+ expr: &charClassMatcher{
+ pos: position{line: 237, col: 14, offset: 7184},
+ val: "[0-7]",
+ ranges: []rune{'0', '7'},
+ ignoreCase: false,
+ inverted: false,
+ },
+ },
+ {
+ name: "DecimalDigit",
+ pos: position{line: 238, col: 1, offset: 7190},
+ expr: &charClassMatcher{
+ pos: position{line: 238, col: 16, offset: 7207},
+ val: "[0-9]",
+ ranges: []rune{'0', '9'},
+ ignoreCase: false,
+ inverted: false,
+ },
+ },
+ {
+ name: "HexDigit",
+ pos: position{line: 239, col: 1, offset: 7213},
+ expr: &charClassMatcher{
+ pos: position{line: 239, col: 12, offset: 7226},
+ val: "[0-9a-f]i",
+ ranges: []rune{'0', '9', 'a', 'f'},
+ ignoreCase: true,
+ inverted: false,
+ },
+ },
+ {
+ name: "CharClassMatcher",
+ pos: position{line: 241, col: 1, offset: 7237},
+ expr: &choiceExpr{
+ pos: position{line: 241, col: 20, offset: 7258},
+ alternatives: []interface{}{
+ &actionExpr{
+ pos: position{line: 241, col: 20, offset: 7258},
+ run: (*parser).callonCharClassMatcher2,
+ expr: &seqExpr{
+ pos: position{line: 241, col: 20, offset: 7258},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 241, col: 20, offset: 7258},
+ val: "[",
+ ignoreCase: false,
+ },
+ &zeroOrMoreExpr{
+ pos: position{line: 241, col: 24, offset: 7262},
+ expr: &choiceExpr{
+ pos: position{line: 241, col: 26, offset: 7264},
+ alternatives: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 241, col: 26, offset: 7264},
+ name: "ClassCharRange",
+ },
+ &ruleRefExpr{
+ pos: position{line: 241, col: 43, offset: 7281},
+ name: "ClassChar",
+ },
+ &seqExpr{
+ pos: position{line: 241, col: 55, offset: 7293},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 241, col: 55, offset: 7293},
+ val: "\\",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 241, col: 60, offset: 7298},
+ name: "UnicodeClassEscape",
+ },
+ },
+ },
+ },
+ },
+ },
+ &litMatcher{
+ pos: position{line: 241, col: 82, offset: 7320},
+ val: "]",
+ ignoreCase: false,
+ },
+ &zeroOrOneExpr{
+ pos: position{line: 241, col: 86, offset: 7324},
+ expr: &litMatcher{
+ pos: position{line: 241, col: 86, offset: 7324},
+ val: "i",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ },
+ &actionExpr{
+ pos: position{line: 245, col: 5, offset: 7431},
+ run: (*parser).callonCharClassMatcher15,
+ expr: &seqExpr{
+ pos: position{line: 245, col: 5, offset: 7431},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 245, col: 5, offset: 7431},
+ val: "[",
+ ignoreCase: false,
+ },
+ &zeroOrMoreExpr{
+ pos: position{line: 245, col: 9, offset: 7435},
+ expr: &seqExpr{
+ pos: position{line: 245, col: 11, offset: 7437},
+ exprs: []interface{}{
+ ¬Expr{
+ pos: position{line: 245, col: 11, offset: 7437},
+ expr: &ruleRefExpr{
+ pos: position{line: 245, col: 14, offset: 7440},
+ name: "EOL",
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 245, col: 20, offset: 7446},
+ name: "SourceChar",
+ },
+ },
+ },
+ },
+ &choiceExpr{
+ pos: position{line: 245, col: 36, offset: 7462},
+ alternatives: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 245, col: 36, offset: 7462},
+ name: "EOL",
+ },
+ &ruleRefExpr{
+ pos: position{line: 245, col: 42, offset: 7468},
+ name: "EOF",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "ClassCharRange",
+ pos: position{line: 249, col: 1, offset: 7578},
+ expr: &seqExpr{
+ pos: position{line: 249, col: 18, offset: 7597},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 249, col: 18, offset: 7597},
+ name: "ClassChar",
+ },
+ &litMatcher{
+ pos: position{line: 249, col: 28, offset: 7607},
+ val: "-",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 249, col: 32, offset: 7611},
+ name: "ClassChar",
+ },
+ },
+ },
+ },
+ {
+ name: "ClassChar",
+ pos: position{line: 250, col: 1, offset: 7621},
+ expr: &choiceExpr{
+ pos: position{line: 250, col: 13, offset: 7635},
+ alternatives: []interface{}{
+ &seqExpr{
+ pos: position{line: 250, col: 13, offset: 7635},
+ exprs: []interface{}{
+ ¬Expr{
+ pos: position{line: 250, col: 13, offset: 7635},
+ expr: &choiceExpr{
+ pos: position{line: 250, col: 16, offset: 7638},
+ alternatives: []interface{}{
+ &litMatcher{
+ pos: position{line: 250, col: 16, offset: 7638},
+ val: "]",
+ ignoreCase: false,
+ },
+ &litMatcher{
+ pos: position{line: 250, col: 22, offset: 7644},
+ val: "\\",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 250, col: 29, offset: 7651},
+ name: "EOL",
+ },
+ },
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 250, col: 35, offset: 7657},
+ name: "SourceChar",
+ },
+ },
+ },
+ &seqExpr{
+ pos: position{line: 250, col: 48, offset: 7670},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 250, col: 48, offset: 7670},
+ val: "\\",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 250, col: 53, offset: 7675},
+ name: "CharClassEscape",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "CharClassEscape",
+ pos: position{line: 251, col: 1, offset: 7691},
+ expr: &choiceExpr{
+ pos: position{line: 251, col: 19, offset: 7711},
+ alternatives: []interface{}{
+ &choiceExpr{
+ pos: position{line: 251, col: 21, offset: 7713},
+ alternatives: []interface{}{
+ &litMatcher{
+ pos: position{line: 251, col: 21, offset: 7713},
+ val: "]",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 251, col: 27, offset: 7719},
+ name: "CommonEscapeSequence",
+ },
+ },
+ },
+ &actionExpr{
+ pos: position{line: 252, col: 7, offset: 7748},
+ run: (*parser).callonCharClassEscape5,
+ expr: &seqExpr{
+ pos: position{line: 252, col: 7, offset: 7748},
+ exprs: []interface{}{
+ ¬Expr{
+ pos: position{line: 252, col: 7, offset: 7748},
+ expr: &litMatcher{
+ pos: position{line: 252, col: 8, offset: 7749},
+ val: "p",
+ ignoreCase: false,
+ },
+ },
+ &choiceExpr{
+ pos: position{line: 252, col: 14, offset: 7755},
+ alternatives: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 252, col: 14, offset: 7755},
+ name: "SourceChar",
+ },
+ &ruleRefExpr{
+ pos: position{line: 252, col: 27, offset: 7768},
+ name: "EOL",
+ },
+ &ruleRefExpr{
+ pos: position{line: 252, col: 33, offset: 7774},
+ name: "EOF",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "UnicodeClassEscape",
+ pos: position{line: 256, col: 1, offset: 7840},
+ expr: &seqExpr{
+ pos: position{line: 256, col: 22, offset: 7863},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 256, col: 22, offset: 7863},
+ val: "p",
+ ignoreCase: false,
+ },
+ &choiceExpr{
+ pos: position{line: 257, col: 7, offset: 7876},
+ alternatives: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 257, col: 7, offset: 7876},
+ name: "SingleCharUnicodeClass",
+ },
+ &actionExpr{
+ pos: position{line: 258, col: 7, offset: 7905},
+ run: (*parser).callonUnicodeClassEscape5,
+ expr: &seqExpr{
+ pos: position{line: 258, col: 7, offset: 7905},
+ exprs: []interface{}{
+ ¬Expr{
+ pos: position{line: 258, col: 7, offset: 7905},
+ expr: &litMatcher{
+ pos: position{line: 258, col: 8, offset: 7906},
+ val: "{",
+ ignoreCase: false,
+ },
+ },
+ &choiceExpr{
+ pos: position{line: 258, col: 14, offset: 7912},
+ alternatives: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 258, col: 14, offset: 7912},
+ name: "SourceChar",
+ },
+ &ruleRefExpr{
+ pos: position{line: 258, col: 27, offset: 7925},
+ name: "EOL",
+ },
+ &ruleRefExpr{
+ pos: position{line: 258, col: 33, offset: 7931},
+ name: "EOF",
+ },
+ },
+ },
+ },
+ },
+ },
+ &actionExpr{
+ pos: position{line: 259, col: 7, offset: 8002},
+ run: (*parser).callonUnicodeClassEscape13,
+ expr: &seqExpr{
+ pos: position{line: 259, col: 7, offset: 8002},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 259, col: 7, offset: 8002},
+ val: "{",
+ ignoreCase: false,
+ },
+ &labeledExpr{
+ pos: position{line: 259, col: 11, offset: 8006},
+ label: "ident",
+ expr: &ruleRefExpr{
+ pos: position{line: 259, col: 17, offset: 8012},
+ name: "IdentifierName",
+ },
+ },
+ &litMatcher{
+ pos: position{line: 259, col: 32, offset: 8027},
+ val: "}",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ &actionExpr{
+ pos: position{line: 265, col: 7, offset: 8204},
+ run: (*parser).callonUnicodeClassEscape19,
+ expr: &seqExpr{
+ pos: position{line: 265, col: 7, offset: 8204},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 265, col: 7, offset: 8204},
+ val: "{",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 265, col: 11, offset: 8208},
+ name: "IdentifierName",
+ },
+ &choiceExpr{
+ pos: position{line: 265, col: 28, offset: 8225},
+ alternatives: []interface{}{
+ &litMatcher{
+ pos: position{line: 265, col: 28, offset: 8225},
+ val: "]",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 265, col: 34, offset: 8231},
+ name: "EOL",
+ },
+ &ruleRefExpr{
+ pos: position{line: 265, col: 40, offset: 8237},
+ name: "EOF",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "SingleCharUnicodeClass",
+ pos: position{line: 269, col: 1, offset: 8320},
+ expr: &charClassMatcher{
+ pos: position{line: 269, col: 26, offset: 8347},
+ val: "[LMNCPZS]",
+ chars: []rune{'L', 'M', 'N', 'C', 'P', 'Z', 'S'},
+ ignoreCase: false,
+ inverted: false,
+ },
+ },
+ {
+ name: "AnyMatcher",
+ pos: position{line: 271, col: 1, offset: 8358},
+ expr: &actionExpr{
+ pos: position{line: 271, col: 14, offset: 8373},
+ run: (*parser).callonAnyMatcher1,
+ expr: &litMatcher{
+ pos: position{line: 271, col: 14, offset: 8373},
+ val: ".",
+ ignoreCase: false,
+ },
+ },
+ },
+ {
+ name: "CodeBlock",
+ pos: position{line: 276, col: 1, offset: 8448},
+ expr: &choiceExpr{
+ pos: position{line: 276, col: 13, offset: 8462},
+ alternatives: []interface{}{
+ &actionExpr{
+ pos: position{line: 276, col: 13, offset: 8462},
+ run: (*parser).callonCodeBlock2,
+ expr: &seqExpr{
+ pos: position{line: 276, col: 13, offset: 8462},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 276, col: 13, offset: 8462},
+ val: "{",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 276, col: 17, offset: 8466},
+ name: "Code",
+ },
+ &litMatcher{
+ pos: position{line: 276, col: 22, offset: 8471},
+ val: "}",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ &actionExpr{
+ pos: position{line: 280, col: 5, offset: 8570},
+ run: (*parser).callonCodeBlock7,
+ expr: &seqExpr{
+ pos: position{line: 280, col: 5, offset: 8570},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 280, col: 5, offset: 8570},
+ val: "{",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 280, col: 9, offset: 8574},
+ name: "Code",
+ },
+ &ruleRefExpr{
+ pos: position{line: 280, col: 14, offset: 8579},
+ name: "EOF",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Code",
+ pos: position{line: 284, col: 1, offset: 8644},
+ expr: &zeroOrMoreExpr{
+ pos: position{line: 284, col: 8, offset: 8653},
+ expr: &choiceExpr{
+ pos: position{line: 284, col: 10, offset: 8655},
+ alternatives: []interface{}{
+ &oneOrMoreExpr{
+ pos: position{line: 284, col: 10, offset: 8655},
+ expr: &seqExpr{
+ pos: position{line: 284, col: 12, offset: 8657},
+ exprs: []interface{}{
+ ¬Expr{
+ pos: position{line: 284, col: 12, offset: 8657},
+ expr: &charClassMatcher{
+ pos: position{line: 284, col: 13, offset: 8658},
+ val: "[{}]",
+ chars: []rune{'{', '}'},
+ ignoreCase: false,
+ inverted: false,
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 284, col: 18, offset: 8663},
+ name: "SourceChar",
+ },
+ },
+ },
+ },
+ &seqExpr{
+ pos: position{line: 284, col: 34, offset: 8679},
+ exprs: []interface{}{
+ &litMatcher{
+ pos: position{line: 284, col: 34, offset: 8679},
+ val: "{",
+ ignoreCase: false,
+ },
+ &ruleRefExpr{
+ pos: position{line: 284, col: 38, offset: 8683},
+ name: "Code",
+ },
+ &litMatcher{
+ pos: position{line: 284, col: 43, offset: 8688},
+ val: "}",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "__",
+ pos: position{line: 286, col: 1, offset: 8696},
+ expr: &zeroOrMoreExpr{
+ pos: position{line: 286, col: 6, offset: 8703},
+ expr: &choiceExpr{
+ pos: position{line: 286, col: 8, offset: 8705},
+ alternatives: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 286, col: 8, offset: 8705},
+ name: "Whitespace",
+ },
+ &ruleRefExpr{
+ pos: position{line: 286, col: 21, offset: 8718},
+ name: "EOL",
+ },
+ &ruleRefExpr{
+ pos: position{line: 286, col: 27, offset: 8724},
+ name: "Comment",
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "_",
+ pos: position{line: 287, col: 1, offset: 8735},
+ expr: &zeroOrMoreExpr{
+ pos: position{line: 287, col: 5, offset: 8741},
+ expr: &choiceExpr{
+ pos: position{line: 287, col: 7, offset: 8743},
+ alternatives: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 287, col: 7, offset: 8743},
+ name: "Whitespace",
+ },
+ &ruleRefExpr{
+ pos: position{line: 287, col: 20, offset: 8756},
+ name: "MultiLineCommentNoLineTerminator",
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "Whitespace",
+ pos: position{line: 289, col: 1, offset: 8793},
+ expr: &charClassMatcher{
+ pos: position{line: 289, col: 14, offset: 8808},
+ val: "[ \\t\\r]",
+ chars: []rune{' ', '\t', '\r'},
+ ignoreCase: false,
+ inverted: false,
+ },
+ },
+ {
+ name: "EOL",
+ pos: position{line: 290, col: 1, offset: 8816},
+ expr: &litMatcher{
+ pos: position{line: 290, col: 7, offset: 8824},
+ val: "\n",
+ ignoreCase: false,
+ },
+ },
+ {
+ name: "EOS",
+ pos: position{line: 291, col: 1, offset: 8829},
+ expr: &choiceExpr{
+ pos: position{line: 291, col: 7, offset: 8837},
+ alternatives: []interface{}{
+ &seqExpr{
+ pos: position{line: 291, col: 7, offset: 8837},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 291, col: 7, offset: 8837},
+ name: "__",
+ },
+ &litMatcher{
+ pos: position{line: 291, col: 10, offset: 8840},
+ val: ";",
+ ignoreCase: false,
+ },
+ },
+ },
+ &seqExpr{
+ pos: position{line: 291, col: 16, offset: 8846},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 291, col: 16, offset: 8846},
+ name: "_",
+ },
+ &zeroOrOneExpr{
+ pos: position{line: 291, col: 18, offset: 8848},
+ expr: &ruleRefExpr{
+ pos: position{line: 291, col: 18, offset: 8848},
+ name: "SingleLineComment",
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 291, col: 37, offset: 8867},
+ name: "EOL",
+ },
+ },
+ },
+ &seqExpr{
+ pos: position{line: 291, col: 43, offset: 8873},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 291, col: 43, offset: 8873},
+ name: "__",
+ },
+ &ruleRefExpr{
+ pos: position{line: 291, col: 46, offset: 8876},
+ name: "EOF",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "EOF",
+ pos: position{line: 293, col: 1, offset: 8881},
+ expr: ¬Expr{
+ pos: position{line: 293, col: 7, offset: 8889},
+ expr: &anyMatcher{
+ line: 293, col: 8, offset: 8890,
+ },
+ },
+ },
+ },
+}
+
+func (c *current) onGrammar1(initializer, rules interface{}) (interface{}, error) {
+ pos := c.astPos()
+
+ // create the grammar, assign its initializer
+ g := ast.NewGrammar(pos)
+ initSlice := toIfaceSlice(initializer)
+ if len(initSlice) > 0 {
+ g.Init = initSlice[0].(*ast.CodeBlock)
+ }
+
+ rulesSlice := toIfaceSlice(rules)
+ g.Rules = make([]*ast.Rule, len(rulesSlice))
+ for i, duo := range rulesSlice {
+ g.Rules[i] = duo.([]interface{})[0].(*ast.Rule)
+ }
+
+ return g, nil
+}
+
+func (p *parser) callonGrammar1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onGrammar1(stack["initializer"], stack["rules"])
+}
+
+func (c *current) onInitializer1(code interface{}) (interface{}, error) {
+ return code, nil
+}
+
+func (p *parser) callonInitializer1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onInitializer1(stack["code"])
+}
+
+func (c *current) onRule1(name, display, expr interface{}) (interface{}, error) {
+ pos := c.astPos()
+
+ rule := ast.NewRule(pos, name.(*ast.Identifier))
+ displaySlice := toIfaceSlice(display)
+ if len(displaySlice) > 0 {
+ rule.DisplayName = displaySlice[0].(*ast.StringLit)
+ }
+ rule.Expr = expr.(ast.Expression)
+
+ return rule, nil
+}
+
+func (p *parser) callonRule1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onRule1(stack["name"], stack["display"], stack["expr"])
+}
+
+func (c *current) onChoiceExpr1(first, rest interface{}) (interface{}, error) {
+ restSlice := toIfaceSlice(rest)
+ if len(restSlice) == 0 {
+ return first, nil
+ }
+
+ pos := c.astPos()
+ choice := ast.NewChoiceExpr(pos)
+ choice.Alternatives = []ast.Expression{first.(ast.Expression)}
+ for _, sl := range restSlice {
+ choice.Alternatives = append(choice.Alternatives, sl.([]interface{})[3].(ast.Expression))
+ }
+ return choice, nil
+}
+
+func (p *parser) callonChoiceExpr1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onChoiceExpr1(stack["first"], stack["rest"])
+}
+
+func (c *current) onActionExpr1(expr, code interface{}) (interface{}, error) {
+ if code == nil {
+ return expr, nil
+ }
+
+ pos := c.astPos()
+ act := ast.NewActionExpr(pos)
+ act.Expr = expr.(ast.Expression)
+ codeSlice := toIfaceSlice(code)
+ act.Code = codeSlice[1].(*ast.CodeBlock)
+
+ return act, nil
+}
+
+func (p *parser) callonActionExpr1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onActionExpr1(stack["expr"], stack["code"])
+}
+
+func (c *current) onSeqExpr1(first, rest interface{}) (interface{}, error) {
+ restSlice := toIfaceSlice(rest)
+ if len(restSlice) == 0 {
+ return first, nil
+ }
+ seq := ast.NewSeqExpr(c.astPos())
+ seq.Exprs = []ast.Expression{first.(ast.Expression)}
+ for _, sl := range restSlice {
+ seq.Exprs = append(seq.Exprs, sl.([]interface{})[1].(ast.Expression))
+ }
+ return seq, nil
+}
+
+func (p *parser) callonSeqExpr1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onSeqExpr1(stack["first"], stack["rest"])
+}
+
+func (c *current) onLabeledExpr2(label, expr interface{}) (interface{}, error) {
+ pos := c.astPos()
+ lab := ast.NewLabeledExpr(pos)
+ lab.Label = label.(*ast.Identifier)
+ lab.Expr = expr.(ast.Expression)
+ return lab, nil
+}
+
+func (p *parser) callonLabeledExpr2() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onLabeledExpr2(stack["label"], stack["expr"])
+}
+
+func (c *current) onPrefixedExpr2(op, expr interface{}) (interface{}, error) {
+ pos := c.astPos()
+ opStr := op.(string)
+ if opStr == "&" {
+ and := ast.NewAndExpr(pos)
+ and.Expr = expr.(ast.Expression)
+ return and, nil
+ }
+ not := ast.NewNotExpr(pos)
+ not.Expr = expr.(ast.Expression)
+ return not, nil
+}
+
+func (p *parser) callonPrefixedExpr2() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onPrefixedExpr2(stack["op"], stack["expr"])
+}
+
+func (c *current) onPrefixedOp1() (interface{}, error) {
+ return string(c.text), nil
+}
+
+func (p *parser) callonPrefixedOp1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onPrefixedOp1()
+}
+
+func (c *current) onSuffixedExpr2(expr, op interface{}) (interface{}, error) {
+ pos := c.astPos()
+ opStr := op.(string)
+ switch opStr {
+ case "?":
+ zero := ast.NewZeroOrOneExpr(pos)
+ zero.Expr = expr.(ast.Expression)
+ return zero, nil
+ case "*":
+ zero := ast.NewZeroOrMoreExpr(pos)
+ zero.Expr = expr.(ast.Expression)
+ return zero, nil
+ case "+":
+ one := ast.NewOneOrMoreExpr(pos)
+ one.Expr = expr.(ast.Expression)
+ return one, nil
+ default:
+ return nil, errors.New("unknown operator: " + opStr)
+ }
+}
+
+func (p *parser) callonSuffixedExpr2() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onSuffixedExpr2(stack["expr"], stack["op"])
+}
+
+func (c *current) onSuffixedOp1() (interface{}, error) {
+ return string(c.text), nil
+}
+
+func (p *parser) callonSuffixedOp1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onSuffixedOp1()
+}
+
+func (c *current) onPrimaryExpr7(expr interface{}) (interface{}, error) {
+ return expr, nil
+}
+
+func (p *parser) callonPrimaryExpr7() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onPrimaryExpr7(stack["expr"])
+}
+
+func (c *current) onRuleRefExpr1(name interface{}) (interface{}, error) {
+ ref := ast.NewRuleRefExpr(c.astPos())
+ ref.Name = name.(*ast.Identifier)
+ return ref, nil
+}
+
+func (p *parser) callonRuleRefExpr1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onRuleRefExpr1(stack["name"])
+}
+
+func (c *current) onSemanticPredExpr1(op, code interface{}) (interface{}, error) {
+ opStr := op.(string)
+ if opStr == "&" {
+ and := ast.NewAndCodeExpr(c.astPos())
+ and.Code = code.(*ast.CodeBlock)
+ return and, nil
+ }
+ not := ast.NewNotCodeExpr(c.astPos())
+ not.Code = code.(*ast.CodeBlock)
+ return not, nil
+}
+
+func (p *parser) callonSemanticPredExpr1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onSemanticPredExpr1(stack["op"], stack["code"])
+}
+
+func (c *current) onSemanticPredOp1() (interface{}, error) {
+ return string(c.text), nil
+}
+
+func (p *parser) callonSemanticPredOp1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onSemanticPredOp1()
+}
+
+func (c *current) onIdentifier1(ident interface{}) (interface{}, error) {
+ astIdent := ast.NewIdentifier(c.astPos(), string(c.text))
+ if reservedWords[astIdent.Val] {
+ return astIdent, errors.New("identifier is a reserved word")
+ }
+ return astIdent, nil
+}
+
+func (p *parser) callonIdentifier1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onIdentifier1(stack["ident"])
+}
+
+func (c *current) onIdentifierName1() (interface{}, error) {
+ return ast.NewIdentifier(c.astPos(), string(c.text)), nil
+}
+
+func (p *parser) callonIdentifierName1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onIdentifierName1()
+}
+
+func (c *current) onLitMatcher1(lit, ignore interface{}) (interface{}, error) {
+ rawStr := lit.(*ast.StringLit).Val
+ s, err := strconv.Unquote(rawStr)
+ if err != nil {
+ // an invalid string literal raises an error in the escape rules,
+ // so simply replace the literal with an empty string here to
+ // avoid a cascade of errors.
+ s = ""
+ }
+ m := ast.NewLitMatcher(c.astPos(), s)
+ m.IgnoreCase = ignore != nil
+ return m, nil
+}
+
+func (p *parser) callonLitMatcher1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onLitMatcher1(stack["lit"], stack["ignore"])
+}
+
+func (c *current) onStringLiteral2() (interface{}, error) {
+ return ast.NewStringLit(c.astPos(), string(c.text)), nil
+}
+
+func (p *parser) callonStringLiteral2() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onStringLiteral2()
+}
+
+func (c *current) onStringLiteral18() (interface{}, error) {
+ return ast.NewStringLit(c.astPos(), "``"), errors.New("string literal not terminated")
+}
+
+func (p *parser) callonStringLiteral18() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onStringLiteral18()
+}
+
+func (c *current) onDoubleStringEscape5() (interface{}, error) {
+ return nil, errors.New("invalid escape character")
+}
+
+func (p *parser) callonDoubleStringEscape5() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onDoubleStringEscape5()
+}
+
+func (c *current) onSingleStringEscape5() (interface{}, error) {
+ return nil, errors.New("invalid escape character")
+}
+
+func (p *parser) callonSingleStringEscape5() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onSingleStringEscape5()
+}
+
+func (c *current) onOctalEscape6() (interface{}, error) {
+ return nil, errors.New("invalid octal escape")
+}
+
+func (p *parser) callonOctalEscape6() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onOctalEscape6()
+}
+
+func (c *current) onHexEscape6() (interface{}, error) {
+ return nil, errors.New("invalid hexadecimal escape")
+}
+
+func (p *parser) callonHexEscape6() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onHexEscape6()
+}
+
+func (c *current) onLongUnicodeEscape2() (interface{}, error) {
+ return validateUnicodeEscape(string(c.text), "invalid Unicode escape")
+
+}
+
+func (p *parser) callonLongUnicodeEscape2() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onLongUnicodeEscape2()
+}
+
+func (c *current) onLongUnicodeEscape13() (interface{}, error) {
+ return nil, errors.New("invalid Unicode escape")
+}
+
+func (p *parser) callonLongUnicodeEscape13() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onLongUnicodeEscape13()
+}
+
+func (c *current) onShortUnicodeEscape2() (interface{}, error) {
+ return validateUnicodeEscape(string(c.text), "invalid Unicode escape")
+
+}
+
+func (p *parser) callonShortUnicodeEscape2() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onShortUnicodeEscape2()
+}
+
+func (c *current) onShortUnicodeEscape9() (interface{}, error) {
+ return nil, errors.New("invalid Unicode escape")
+}
+
+func (p *parser) callonShortUnicodeEscape9() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onShortUnicodeEscape9()
+}
+
+func (c *current) onCharClassMatcher2() (interface{}, error) {
+ pos := c.astPos()
+ cc := ast.NewCharClassMatcher(pos, string(c.text))
+ return cc, nil
+}
+
+func (p *parser) callonCharClassMatcher2() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onCharClassMatcher2()
+}
+
+func (c *current) onCharClassMatcher15() (interface{}, error) {
+ return ast.NewCharClassMatcher(c.astPos(), "[]"), errors.New("character class not terminated")
+}
+
+func (p *parser) callonCharClassMatcher15() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onCharClassMatcher15()
+}
+
+func (c *current) onCharClassEscape5() (interface{}, error) {
+ return nil, errors.New("invalid escape character")
+}
+
+func (p *parser) callonCharClassEscape5() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onCharClassEscape5()
+}
+
+func (c *current) onUnicodeClassEscape5() (interface{}, error) {
+ return nil, errors.New("invalid Unicode class escape")
+}
+
+func (p *parser) callonUnicodeClassEscape5() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onUnicodeClassEscape5()
+}
+
+func (c *current) onUnicodeClassEscape13(ident interface{}) (interface{}, error) {
+ if !unicodeClasses[ident.(*ast.Identifier).Val] {
+ return nil, errors.New("invalid Unicode class escape")
+ }
+ return nil, nil
+
+}
+
+func (p *parser) callonUnicodeClassEscape13() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onUnicodeClassEscape13(stack["ident"])
+}
+
+func (c *current) onUnicodeClassEscape19() (interface{}, error) {
+ return nil, errors.New("Unicode class not terminated")
+
+}
+
+func (p *parser) callonUnicodeClassEscape19() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onUnicodeClassEscape19()
+}
+
+func (c *current) onAnyMatcher1() (interface{}, error) {
+ any := ast.NewAnyMatcher(c.astPos(), ".")
+ return any, nil
+}
+
+func (p *parser) callonAnyMatcher1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onAnyMatcher1()
+}
+
+func (c *current) onCodeBlock2() (interface{}, error) {
+ pos := c.astPos()
+ cb := ast.NewCodeBlock(pos, string(c.text))
+ return cb, nil
+}
+
+func (p *parser) callonCodeBlock2() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onCodeBlock2()
+}
+
+func (c *current) onCodeBlock7() (interface{}, error) {
+ return nil, errors.New("code block not terminated")
+}
+
+func (p *parser) callonCodeBlock7() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onCodeBlock7()
+}
+
+var (
+ // errNoRule is returned when the grammar to parse has no rule.
+ errNoRule = errors.New("grammar has no rule")
+
+ // errInvalidEncoding is returned when the source is not properly
+ // utf8-encoded.
+ errInvalidEncoding = errors.New("invalid encoding")
+
+ // errNoMatch is returned if no match could be found.
+ errNoMatch = errors.New("no match found")
+)
+
+// Option is a function that can set an option on the parser. It returns
+// the previous setting as an Option.
+type Option func(*parser) Option
+
+// Debug creates an Option to set the debug flag to b. When set to true,
+// debugging information is printed to stdout while parsing.
+//
+// The default is false.
+func Debug(b bool) Option {
+ return func(p *parser) Option {
+ old := p.debug
+ p.debug = b
+ return Debug(old)
+ }
+}
+
+// Memoize creates an Option to set the memoize flag to b. When set to true,
+// the parser will cache all results so each expression is evaluated only
+// once. This guarantees linear parsing time even for pathological cases,
+// at the expense of more memory and slower times for typical cases.
+//
+// The default is false.
+func Memoize(b bool) Option {
+ return func(p *parser) Option {
+ old := p.memoize
+ p.memoize = b
+ return Memoize(old)
+ }
+}
+
+// Recover creates an Option to set the recover flag to b. When set to
+// true, this causes the parser to recover from panics and convert it
+// to an error. Setting it to false can be useful while debugging to
+// access the full stack trace.
+//
+// The default is true.
+func Recover(b bool) Option {
+ return func(p *parser) Option {
+ old := p.recover
+ p.recover = b
+ return Recover(old)
+ }
+}
+
+// ParseFile parses the file identified by filename.
+func ParseFile(filename string, opts ...Option) (interface{}, error) {
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ return ParseReader(filename, f, opts...)
+}
+
+// ParseReader parses the data from r using filename as information in the
+// error messages.
+func ParseReader(filename string, r io.Reader, opts ...Option) (interface{}, error) {
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+
+ return Parse(filename, b, opts...)
+}
+
+// Parse parses the data from b using filename as information in the
+// error messages.
+func Parse(filename string, b []byte, opts ...Option) (interface{}, error) {
+ return newParser(filename, b, opts...).parse(g)
+}
+
+// position records a position in the text.
+type position struct {
+ line, col, offset int
+}
+
+func (p position) String() string {
+ return fmt.Sprintf("%d:%d [%d]", p.line, p.col, p.offset)
+}
+
+// savepoint stores all state required to go back to this point in the
+// parser.
+type savepoint struct {
+ position
+ rn rune
+ w int
+}
+
+type current struct {
+ pos position // start position of the match
+ text []byte // raw text of the match
+}
+
+// the AST types...
+
+type grammar struct {
+ pos position
+ rules []*rule
+}
+
+type rule struct {
+ pos position
+ name string
+ displayName string
+ expr interface{}
+}
+
+type choiceExpr struct {
+ pos position
+ alternatives []interface{}
+}
+
+type actionExpr struct {
+ pos position
+ expr interface{}
+ run func(*parser) (interface{}, error)
+}
+
+type seqExpr struct {
+ pos position
+ exprs []interface{}
+}
+
+type labeledExpr struct {
+ pos position
+ label string
+ expr interface{}
+}
+
+type expr struct {
+ pos position
+ expr interface{}
+}
+
+type andExpr expr
+type notExpr expr
+type zeroOrOneExpr expr
+type zeroOrMoreExpr expr
+type oneOrMoreExpr expr
+
+type ruleRefExpr struct {
+ pos position
+ name string
+}
+
+type andCodeExpr struct {
+ pos position
+ run func(*parser) (bool, error)
+}
+
+type notCodeExpr struct {
+ pos position
+ run func(*parser) (bool, error)
+}
+
+type litMatcher struct {
+ pos position
+ val string
+ ignoreCase bool
+}
+
+type charClassMatcher struct {
+ pos position
+ val string
+ chars []rune
+ ranges []rune
+ classes []*unicode.RangeTable
+ ignoreCase bool
+ inverted bool
+}
+
+type anyMatcher position
+
+// errList cumulates the errors found by the parser.
+type errList []error
+
+func (e *errList) add(err error) {
+ *e = append(*e, err)
+}
+
+func (e errList) err() error {
+ if len(e) == 0 {
+ return nil
+ }
+ e.dedupe()
+ return e
+}
+
+func (e *errList) dedupe() {
+ var cleaned []error
+ set := make(map[string]bool)
+ for _, err := range *e {
+ if msg := err.Error(); !set[msg] {
+ set[msg] = true
+ cleaned = append(cleaned, err)
+ }
+ }
+ *e = cleaned
+}
+
+func (e errList) Error() string {
+ switch len(e) {
+ case 0:
+ return ""
+ case 1:
+ return e[0].Error()
+ default:
+ var buf bytes.Buffer
+
+ for i, err := range e {
+ if i > 0 {
+ buf.WriteRune('\n')
+ }
+ buf.WriteString(err.Error())
+ }
+ return buf.String()
+ }
+}
+
+// parserError wraps an error with a prefix indicating the rule in which
+// the error occurred. The original error is stored in the Inner field.
+type parserError struct {
+ Inner error
+ pos position
+ prefix string
+}
+
+// Error returns the error message.
+func (p *parserError) Error() string {
+ return p.prefix + ": " + p.Inner.Error()
+}
+
+// newParser creates a parser with the specified input source and options.
+func newParser(filename string, b []byte, opts ...Option) *parser {
+ p := &parser{
+ filename: filename,
+ errs: new(errList),
+ data: b,
+ pt: savepoint{position: position{line: 1}},
+ recover: true,
+ }
+ p.setOptions(opts)
+ return p
+}
+
+// setOptions applies the options to the parser.
+func (p *parser) setOptions(opts []Option) {
+ for _, opt := range opts {
+ opt(p)
+ }
+}
+
+type resultTuple struct {
+ v interface{}
+ b bool
+ end savepoint
+}
+
+type parser struct {
+ filename string
+ pt savepoint
+ cur current
+
+ data []byte
+ errs *errList
+
+ recover bool
+ debug bool
+ depth int
+
+ memoize bool
+ // memoization table for the packrat algorithm:
+ // map[offset in source] map[expression or rule] {value, match}
+ memo map[int]map[interface{}]resultTuple
+
+ // rules table, maps the rule identifier to the rule node
+ rules map[string]*rule
+ // variables stack, map of label to value
+ vstack []map[string]interface{}
+ // rule stack, allows identification of the current rule in errors
+ rstack []*rule
+
+ // stats
+ exprCnt int
+}
+
+// push a variable set on the vstack.
+func (p *parser) pushV() {
+ if cap(p.vstack) == len(p.vstack) {
+ // create new empty slot in the stack
+ p.vstack = append(p.vstack, nil)
+ } else {
+ // slice to 1 more
+ p.vstack = p.vstack[:len(p.vstack)+1]
+ }
+
+ // get the last args set
+ m := p.vstack[len(p.vstack)-1]
+ if m != nil && len(m) == 0 {
+ // empty map, all good
+ return
+ }
+
+ m = make(map[string]interface{})
+ p.vstack[len(p.vstack)-1] = m
+}
+
+// pop a variable set from the vstack.
+func (p *parser) popV() {
+ // if the map is not empty, clear it
+ m := p.vstack[len(p.vstack)-1]
+ if len(m) > 0 {
+ // GC that map
+ p.vstack[len(p.vstack)-1] = nil
+ }
+ p.vstack = p.vstack[:len(p.vstack)-1]
+}
+
+func (p *parser) print(prefix, s string) string {
+ if !p.debug {
+ return s
+ }
+
+ fmt.Printf("%s %d:%d:%d: %s [%#U]\n",
+ prefix, p.pt.line, p.pt.col, p.pt.offset, s, p.pt.rn)
+ return s
+}
+
+func (p *parser) in(s string) string {
+ p.depth++
+ return p.print(strings.Repeat(" ", p.depth)+">", s)
+}
+
+func (p *parser) out(s string) string {
+ p.depth--
+ return p.print(strings.Repeat(" ", p.depth)+"<", s)
+}
+
+func (p *parser) addErr(err error) {
+ p.addErrAt(err, p.pt.position)
+}
+
+func (p *parser) addErrAt(err error, pos position) {
+ var buf bytes.Buffer
+ if p.filename != "" {
+ buf.WriteString(p.filename)
+ }
+ if buf.Len() > 0 {
+ buf.WriteString(":")
+ }
+ buf.WriteString(fmt.Sprintf("%d:%d (%d)", pos.line, pos.col, pos.offset))
+ if len(p.rstack) > 0 {
+ if buf.Len() > 0 {
+ buf.WriteString(": ")
+ }
+ rule := p.rstack[len(p.rstack)-1]
+ if rule.displayName != "" {
+ buf.WriteString("rule " + rule.displayName)
+ } else {
+ buf.WriteString("rule " + rule.name)
+ }
+ }
+ pe := &parserError{Inner: err, prefix: buf.String()}
+ p.errs.add(pe)
+}
+
+// read advances the parser to the next rune.
+func (p *parser) read() {
+ p.pt.offset += p.pt.w
+ rn, n := utf8.DecodeRune(p.data[p.pt.offset:])
+ p.pt.rn = rn
+ p.pt.w = n
+ p.pt.col++
+ if rn == '\n' {
+ p.pt.line++
+ p.pt.col = 0
+ }
+
+ if rn == utf8.RuneError {
+ if n > 0 {
+ p.addErr(errInvalidEncoding)
+ }
+ }
+}
+
+// restore parser position to the savepoint pt.
+func (p *parser) restore(pt savepoint) {
+ if p.debug {
+ defer p.out(p.in("restore"))
+ }
+ if pt.offset == p.pt.offset {
+ return
+ }
+ p.pt = pt
+}
+
+// get the slice of bytes from the savepoint start to the current position.
+func (p *parser) sliceFrom(start savepoint) []byte {
+ return p.data[start.position.offset:p.pt.position.offset]
+}
+
+func (p *parser) getMemoized(node interface{}) (resultTuple, bool) {
+ if len(p.memo) == 0 {
+ return resultTuple{}, false
+ }
+ m := p.memo[p.pt.offset]
+ if len(m) == 0 {
+ return resultTuple{}, false
+ }
+ res, ok := m[node]
+ return res, ok
+}
+
+func (p *parser) setMemoized(pt savepoint, node interface{}, tuple resultTuple) {
+ if p.memo == nil {
+ p.memo = make(map[int]map[interface{}]resultTuple)
+ }
+ m := p.memo[pt.offset]
+ if m == nil {
+ m = make(map[interface{}]resultTuple)
+ p.memo[pt.offset] = m
+ }
+ m[node] = tuple
+}
+
+func (p *parser) buildRulesTable(g *grammar) {
+ p.rules = make(map[string]*rule, len(g.rules))
+ for _, r := range g.rules {
+ p.rules[r.name] = r
+ }
+}
+
+func (p *parser) parse(g *grammar) (val interface{}, err error) {
+ if len(g.rules) == 0 {
+ p.addErr(errNoRule)
+ return nil, p.errs.err()
+ }
+
+ // TODO : not super critical but this could be generated
+ p.buildRulesTable(g)
+
+ if p.recover {
+ // panic can be used in action code to stop parsing immediately
+ // and return the panic as an error.
+ defer func() {
+ if e := recover(); e != nil {
+ if p.debug {
+ defer p.out(p.in("panic handler"))
+ }
+ val = nil
+ switch e := e.(type) {
+ case error:
+ p.addErr(e)
+ default:
+ p.addErr(fmt.Errorf("%v", e))
+ }
+ err = p.errs.err()
+ }
+ }()
+ }
+
+ // start rule is rule [0]
+ p.read() // advance to first rune
+ val, ok := p.parseRule(g.rules[0])
+ if !ok {
+ if len(*p.errs) == 0 {
+ // make sure this doesn't go out silently
+ p.addErr(errNoMatch)
+ }
+ return nil, p.errs.err()
+ }
+ return val, p.errs.err()
+}
+
+func (p *parser) parseRule(rule *rule) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseRule " + rule.name))
+ }
+
+ if p.memoize {
+ res, ok := p.getMemoized(rule)
+ if ok {
+ p.restore(res.end)
+ return res.v, res.b
+ }
+ }
+
+ start := p.pt
+ p.rstack = append(p.rstack, rule)
+ p.pushV()
+ val, ok := p.parseExpr(rule.expr)
+ p.popV()
+ p.rstack = p.rstack[:len(p.rstack)-1]
+ if ok && p.debug {
+ p.print(strings.Repeat(" ", p.depth)+"MATCH", string(p.sliceFrom(start)))
+ }
+
+ if p.memoize {
+ p.setMemoized(start, rule, resultTuple{val, ok, p.pt})
+ }
+ return val, ok
+}
+
+func (p *parser) parseExpr(expr interface{}) (interface{}, bool) {
+ var pt savepoint
+ var ok bool
+
+ if p.memoize {
+ res, ok := p.getMemoized(expr)
+ if ok {
+ p.restore(res.end)
+ return res.v, res.b
+ }
+ pt = p.pt
+ }
+
+ p.exprCnt++
+ var val interface{}
+ switch expr := expr.(type) {
+ case *actionExpr:
+ val, ok = p.parseActionExpr(expr)
+ case *andCodeExpr:
+ val, ok = p.parseAndCodeExpr(expr)
+ case *andExpr:
+ val, ok = p.parseAndExpr(expr)
+ case *anyMatcher:
+ val, ok = p.parseAnyMatcher(expr)
+ case *charClassMatcher:
+ val, ok = p.parseCharClassMatcher(expr)
+ case *choiceExpr:
+ val, ok = p.parseChoiceExpr(expr)
+ case *labeledExpr:
+ val, ok = p.parseLabeledExpr(expr)
+ case *litMatcher:
+ val, ok = p.parseLitMatcher(expr)
+ case *notCodeExpr:
+ val, ok = p.parseNotCodeExpr(expr)
+ case *notExpr:
+ val, ok = p.parseNotExpr(expr)
+ case *oneOrMoreExpr:
+ val, ok = p.parseOneOrMoreExpr(expr)
+ case *ruleRefExpr:
+ val, ok = p.parseRuleRefExpr(expr)
+ case *seqExpr:
+ val, ok = p.parseSeqExpr(expr)
+ case *zeroOrMoreExpr:
+ val, ok = p.parseZeroOrMoreExpr(expr)
+ case *zeroOrOneExpr:
+ val, ok = p.parseZeroOrOneExpr(expr)
+ default:
+ panic(fmt.Sprintf("unknown expression type %T", expr))
+ }
+ if p.memoize {
+ p.setMemoized(pt, expr, resultTuple{val, ok, p.pt})
+ }
+ return val, ok
+}
+
+func (p *parser) parseActionExpr(act *actionExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseActionExpr"))
+ }
+
+ start := p.pt
+ val, ok := p.parseExpr(act.expr)
+ if ok {
+ p.cur.pos = start.position
+ p.cur.text = p.sliceFrom(start)
+ actVal, err := act.run(p)
+ if err != nil {
+ p.addErrAt(err, start.position)
+ }
+ val = actVal
+ }
+ if ok && p.debug {
+ p.print(strings.Repeat(" ", p.depth)+"MATCH", string(p.sliceFrom(start)))
+ }
+ return val, ok
+}
+
+func (p *parser) parseAndCodeExpr(and *andCodeExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseAndCodeExpr"))
+ }
+
+ ok, err := and.run(p)
+ if err != nil {
+ p.addErr(err)
+ }
+ return nil, ok
+}
+
+func (p *parser) parseAndExpr(and *andExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseAndExpr"))
+ }
+
+ pt := p.pt
+ p.pushV()
+ _, ok := p.parseExpr(and.expr)
+ p.popV()
+ p.restore(pt)
+ return nil, ok
+}
+
+func (p *parser) parseAnyMatcher(any *anyMatcher) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseAnyMatcher"))
+ }
+
+ if p.pt.rn != utf8.RuneError {
+ start := p.pt
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ return nil, false
+}
+
+func (p *parser) parseCharClassMatcher(chr *charClassMatcher) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseCharClassMatcher"))
+ }
+
+ cur := p.pt.rn
+ // can't match EOF
+ if cur == utf8.RuneError {
+ return nil, false
+ }
+ start := p.pt
+ if chr.ignoreCase {
+ cur = unicode.ToLower(cur)
+ }
+
+ // try to match in the list of available chars
+ for _, rn := range chr.chars {
+ if rn == cur {
+ if chr.inverted {
+ return nil, false
+ }
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ }
+
+ // try to match in the list of ranges
+ for i := 0; i < len(chr.ranges); i += 2 {
+ if cur >= chr.ranges[i] && cur <= chr.ranges[i+1] {
+ if chr.inverted {
+ return nil, false
+ }
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ }
+
+ // try to match in the list of Unicode classes
+ for _, cl := range chr.classes {
+ if unicode.Is(cl, cur) {
+ if chr.inverted {
+ return nil, false
+ }
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ }
+
+ if chr.inverted {
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ return nil, false
+}
+
+func (p *parser) parseChoiceExpr(ch *choiceExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseChoiceExpr"))
+ }
+
+ for _, alt := range ch.alternatives {
+ p.pushV()
+ val, ok := p.parseExpr(alt)
+ p.popV()
+ if ok {
+ return val, ok
+ }
+ }
+ return nil, false
+}
+
+func (p *parser) parseLabeledExpr(lab *labeledExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseLabeledExpr"))
+ }
+
+ p.pushV()
+ val, ok := p.parseExpr(lab.expr)
+ p.popV()
+ if ok && lab.label != "" {
+ m := p.vstack[len(p.vstack)-1]
+ m[lab.label] = val
+ }
+ return val, ok
+}
+
+func (p *parser) parseLitMatcher(lit *litMatcher) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseLitMatcher"))
+ }
+
+ start := p.pt
+ for _, want := range lit.val {
+ cur := p.pt.rn
+ if lit.ignoreCase {
+ cur = unicode.ToLower(cur)
+ }
+ if cur != want {
+ p.restore(start)
+ return nil, false
+ }
+ p.read()
+ }
+ return p.sliceFrom(start), true
+}
+
+func (p *parser) parseNotCodeExpr(not *notCodeExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseNotCodeExpr"))
+ }
+
+ ok, err := not.run(p)
+ if err != nil {
+ p.addErr(err)
+ }
+ return nil, !ok
+}
+
+func (p *parser) parseNotExpr(not *notExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseNotExpr"))
+ }
+
+ pt := p.pt
+ p.pushV()
+ _, ok := p.parseExpr(not.expr)
+ p.popV()
+ p.restore(pt)
+ return nil, !ok
+}
+
+func (p *parser) parseOneOrMoreExpr(expr *oneOrMoreExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseOneOrMoreExpr"))
+ }
+
+ var vals []interface{}
+
+ for {
+ p.pushV()
+ val, ok := p.parseExpr(expr.expr)
+ p.popV()
+ if !ok {
+ if len(vals) == 0 {
+ // did not match once, no match
+ return nil, false
+ }
+ return vals, true
+ }
+ vals = append(vals, val)
+ }
+}
+
+func (p *parser) parseRuleRefExpr(ref *ruleRefExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseRuleRefExpr " + ref.name))
+ }
+
+ if ref.name == "" {
+ panic(fmt.Sprintf("%s: invalid rule: missing name", ref.pos))
+ }
+
+ rule := p.rules[ref.name]
+ if rule == nil {
+ p.addErr(fmt.Errorf("undefined rule: %s", ref.name))
+ return nil, false
+ }
+ return p.parseRule(rule)
+}
+
+func (p *parser) parseSeqExpr(seq *seqExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseSeqExpr"))
+ }
+
+ var vals []interface{}
+
+ pt := p.pt
+ for _, expr := range seq.exprs {
+ val, ok := p.parseExpr(expr)
+ if !ok {
+ p.restore(pt)
+ return nil, false
+ }
+ vals = append(vals, val)
+ }
+ return vals, true
+}
+
+func (p *parser) parseZeroOrMoreExpr(expr *zeroOrMoreExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseZeroOrMoreExpr"))
+ }
+
+ var vals []interface{}
+
+ for {
+ p.pushV()
+ val, ok := p.parseExpr(expr.expr)
+ p.popV()
+ if !ok {
+ return vals, true
+ }
+ vals = append(vals, val)
+ }
+}
+
+func (p *parser) parseZeroOrOneExpr(expr *zeroOrOneExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseZeroOrOneExpr"))
+ }
+
+ p.pushV()
+ val, _ := p.parseExpr(expr.expr)
+ p.popV()
+ // whether it matched or not, consider it a match
+ return val, true
+}
+
+func rangeTable(class string) *unicode.RangeTable {
+ if rt, ok := unicode.Categories[class]; ok {
+ return rt
+ }
+ if rt, ok := unicode.Properties[class]; ok {
+ return rt
+ }
+ if rt, ok := unicode.Scripts[class]; ok {
+ return rt
+ }
+
+ // cannot happen
+ panic(fmt.Sprintf("invalid Unicode class: %s", class))
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/reserved_words.go b/vendor/github.com/PuerkitoBio/pigeon/reserved_words.go
new file mode 100644
index 0000000000..127d27387d
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/reserved_words.go
@@ -0,0 +1,71 @@
+package main
+
+var reservedWords = map[string]bool{
+ // Go keywords http://golang.org/ref/spec#Keywords
+ "break": true,
+ "case": true,
+ "chan": true,
+ "const": true,
+ "continue": true,
+ "default": true,
+ "defer": true,
+ "else": true,
+ "fallthrough": true,
+ "for": true,
+ "func": true,
+ "goto": true,
+ "go": true,
+ "if": true,
+ "import": true,
+ "interface": true,
+ "map": true,
+ "package": true,
+ "range": true,
+ "return": true,
+ "select": true,
+ "struct": true,
+ "switch": true,
+ "type": true,
+ "var": true,
+
+ // predeclared identifiers http://golang.org/ref/spec#Predeclared_identifiers
+ "bool": true,
+ "byte": true,
+ "complex64": true,
+ "complex128": true,
+ "error": true,
+ "float32": true,
+ "float64": true,
+ "int8": true,
+ "int16": true,
+ "int32": true,
+ "int64": true,
+ "int": true,
+ "rune": true,
+ "string": true,
+ "uint8": true,
+ "uint16": true,
+ "uint32": true,
+ "uint64": true,
+ "uintptr": true,
+ "uint": true,
+ "true": true,
+ "false": true,
+ "iota": true,
+ "nil": true,
+ "append": true,
+ "cap": true,
+ "close": true,
+ "complex": true,
+ "copy": true,
+ "delete": true,
+ "imag": true,
+ "len": true,
+ "make": true,
+ "new": true,
+ "panic": true,
+ "println": true,
+ "print": true,
+ "real": true,
+ "recover": true,
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/targeted_test.go b/vendor/github.com/PuerkitoBio/pigeon/targeted_test.go
new file mode 100644
index 0000000000..24b9b633d9
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/targeted_test.go
@@ -0,0 +1,855 @@
+package main
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "testing"
+ "unicode"
+)
+
+func TestParseNoRule(t *testing.T) {
+ g := &grammar{}
+ p := newParser("", []byte(""))
+ _, err := p.parse(g)
+ if err == nil {
+ t.Fatal("want error, got nil")
+ }
+ el, ok := err.(errList)
+ if !ok {
+ t.Fatalf("want error type %T, got %T", errList{}, err)
+ }
+ if len(el) != 1 {
+ t.Fatalf("want 1 error, got %d", len(el))
+ }
+ pe, ok := el[0].(*parserError)
+ if !ok {
+ t.Fatalf("want single error type %T, got %T", &parserError{}, el[0])
+ }
+ if pe.Inner != errNoRule {
+ t.Fatalf("want error %v, got %v", errNoRule, el[0])
+ }
+}
+
+func TestParseAnyMatcher(t *testing.T) {
+ cases := []struct {
+ in string
+ out []byte
+ }{
+ {"", nil},
+ {"a", []byte("a")},
+ {"\u2190", []byte("\u2190")},
+ {"ab", []byte("a")},
+ {"\u2190\U00001100", []byte("\u2190")},
+ {"\x0d", []byte("\x0d")},
+ {"\xfa", nil},
+ {"\nab", []byte("\n")},
+ }
+
+ for _, tc := range cases {
+ p := newParser("", []byte(tc.in))
+
+ // advance to the first rune
+ p.read()
+
+ var want interface{}
+ var match bool
+ if tc.out != nil {
+ want = tc.out
+ match = true
+ }
+ got, ok := p.parseAnyMatcher(&anyMatcher{})
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("%q: want %v, got %v", tc.in, tc.out, got)
+ }
+ if ok != match {
+ t.Errorf("%q: want match? %t, got %t", tc.in, match, ok)
+ }
+ if p.pt.offset != len(tc.out) {
+ t.Errorf("%q: want offset %d, got %d", tc.in, len(tc.out), p.pt.offset)
+ }
+ }
+}
+
+func TestParseLitMatcher(t *testing.T) {
+ cases := []struct {
+ in string
+ lit string
+ ic bool
+ out []byte
+ }{
+ {"", "", false, []byte{}}, // empty literal always matches
+ {"", "", true, []byte{}}, // empty literal always matches
+ {"a", "", false, []byte{}},
+ {"a", "", true, []byte{}},
+ {"a", "a", false, []byte("a")},
+ {"a", "a", true, []byte("a")},
+ {"a", "A", false, nil},
+ {"a", "a", true, []byte("a")}, // ignored case literal is always generated lowercase
+ {"A", "a", true, []byte("A")},
+ {"b", "a", false, nil},
+ {"b", "a", true, nil},
+ {"abc", "ab", false, []byte("ab")},
+ {"abc", "ab", true, []byte("ab")},
+ {"ab", "abc", false, nil},
+ {"ab", "abc", true, nil},
+ {"\u2190a", "\u2190", false, []byte("\u2190")},
+ {"\u2190a", "\u2190", true, []byte("\u2190")},
+ {"\n", "\n", false, []byte("\n")},
+ {"\n", "\n", true, []byte("\n")},
+ {"\na", "\n", false, []byte("\n")},
+ {"\na", "\n", true, []byte("\n")},
+ }
+
+ for _, tc := range cases {
+ p := newParser("", []byte(tc.in))
+
+ // advance to the first rune
+ p.read()
+
+ var want interface{}
+ var match bool
+ if tc.out != nil {
+ match = true
+ want = tc.out
+ }
+ lbl := fmt.Sprintf("%q (%t): %q", tc.lit, tc.ic, tc.in)
+
+ got, ok := p.parseLitMatcher(&litMatcher{val: tc.lit, ignoreCase: tc.ic})
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("%s: want %v, got %v", lbl, tc.out, got)
+ }
+ if ok != match {
+ t.Errorf("%s: want match? %t, got %t", lbl, match, ok)
+ }
+ if p.pt.offset != len(tc.out) {
+ t.Errorf("%s: want offset %d, got %d", lbl, len(tc.out), p.pt.offset)
+ }
+ }
+}
+
+func TestParseCharClassMatcher(t *testing.T) {
+ cases := []struct {
+ in string
+ val string
+ chars []rune
+ ranges []rune
+ classes []string
+ ic bool
+ iv bool
+ out []byte
+ }{
+ {in: "", val: "[]", out: nil}, // empty char class means no char matches
+ {in: "", val: "[^]", iv: true, out: nil}, // can't match EOF
+ {in: "", val: "[]i", ic: true, out: nil},
+ {in: "", val: "[^]i", ic: true, iv: true, out: nil}, // can't match EOF
+ {in: "a", val: "[]", out: nil},
+ {in: "a", val: "[^]", iv: true, out: []byte("a")},
+ {in: "a", val: "[]i", ic: true, out: nil},
+ {in: "a", val: "[^]i", ic: true, iv: true, out: []byte("a")},
+
+ {in: "a", val: "[a]", chars: []rune{'a'}, out: []byte("a")},
+ {in: "a", val: "[a]i", ic: true, chars: []rune{'a'}, out: []byte("a")},
+ {in: "A", val: "[a]i", ic: true, chars: []rune{'a'}, out: []byte("A")},
+ {in: "a", val: "[^a]", chars: []rune{'a'}, iv: true, out: nil},
+ {in: "A", val: "[^a]i", iv: true, ic: true, chars: []rune{'a'}, out: nil},
+
+ {in: "b", val: "[a]", chars: []rune{'a'}, out: nil},
+ {in: "b", val: "[a]i", ic: true, chars: []rune{'a'}, out: nil},
+ {in: "B", val: "[a]i", ic: true, chars: []rune{'a'}, out: nil},
+ {in: "b", val: "[^a]", chars: []rune{'a'}, iv: true, out: []byte("b")},
+ {in: "b", val: "[^a]i", iv: true, ic: true, chars: []rune{'a'}, out: []byte("b")},
+ {in: "B", val: "[^a]i", iv: true, ic: true, chars: []rune{'a'}, out: []byte("B")},
+
+ {in: "←", val: "[a]", chars: []rune{'a'}, out: nil},
+ {in: "←", val: "[a]i", ic: true, chars: []rune{'a'}, out: nil},
+ {in: "←", val: "[a]i", ic: true, chars: []rune{'a'}, out: nil},
+ {in: "←", val: "[^a]", chars: []rune{'a'}, iv: true, out: []byte("←")},
+ {in: "←", val: "[^a]i", iv: true, ic: true, chars: []rune{'a'}, out: []byte("←")},
+ {in: "←", val: "[^a]i", iv: true, ic: true, chars: []rune{'a'}, out: []byte("←")},
+
+ {in: "b", val: "[a-c]", ranges: []rune{'a', 'c'}, out: []byte("b")},
+ {in: "B", val: "[a-c]", ranges: []rune{'a', 'c'}, out: nil},
+ {in: "b", val: "[a-c]i", ic: true, ranges: []rune{'a', 'c'}, out: []byte("b")},
+ {in: "B", val: "[a-c]i", ic: true, ranges: []rune{'a', 'c'}, out: []byte("B")},
+ {in: "b", val: "[^a-c]", ranges: []rune{'a', 'c'}, iv: true, out: nil},
+ {in: "B", val: "[^a-c]", ranges: []rune{'a', 'c'}, iv: true, out: []byte("B")},
+ {in: "b", val: "[^a-c]i", iv: true, ic: true, ranges: []rune{'a', 'c'}, out: nil},
+ {in: "B", val: "[^a-c]i", iv: true, ic: true, ranges: []rune{'a', 'c'}, out: nil},
+ {in: "z", val: "[^a-c]i", iv: true, ic: true, chars: []rune{'a', 'c'}, out: []byte("z")},
+
+ {in: "∝", val: "[a-c]", ranges: []rune{'a', 'c'}, out: nil},
+ {in: "∝", val: "[a-c]", ranges: []rune{'a', 'c'}, out: nil},
+ {in: "∝", val: "[a-c]i", ic: true, ranges: []rune{'a', 'c'}, out: nil},
+ {in: "∝", val: "[a-c]i", ic: true, ranges: []rune{'a', 'c'}, out: nil},
+ {in: "∝", val: "[^a-c]", ranges: []rune{'a', 'c'}, iv: true, out: []byte("∝")},
+ {in: "∝", val: "[^a-c]", ranges: []rune{'a', 'c'}, iv: true, out: []byte("∝")},
+ {in: "∝", val: "[^a-c]i", iv: true, ic: true, ranges: []rune{'a', 'c'}, out: []byte("∝")},
+ {in: "∝", val: "[^a-c]i", iv: true, ic: true, ranges: []rune{'a', 'c'}, out: []byte("∝")},
+ {in: "∝", val: "[^a-c]i", iv: true, ic: true, chars: []rune{'a', 'c'}, out: []byte("∝")},
+
+ {in: "b", val: "[c-a]", ranges: []rune{'c', 'a'}, out: nil},
+ {in: "B", val: "[c-a]i", ic: true, ranges: []rune{'c', 'a'}, out: nil},
+ {in: "B", val: "[^c-a]", iv: true, ranges: []rune{'c', 'a'}, out: []byte("B")},
+ {in: "B", val: "[^c-a]i", ic: true, iv: true, ranges: []rune{'c', 'a'}, out: []byte("B")},
+
+ {in: "b", val: "[\\pL]", classes: []string{"L"}, out: []byte("b")},
+ {in: "b", val: "[\\pL]i", ic: true, classes: []string{"L"}, out: []byte("b")},
+ {in: "B", val: "[\\pL]i", ic: true, classes: []string{"L"}, out: []byte("B")},
+ {in: "b", val: "[^\\pL]", iv: true, classes: []string{"L"}, out: nil},
+ {in: "b", val: "[^\\pL]i", iv: true, ic: true, classes: []string{"L"}, out: nil},
+ {in: "B", val: "[^\\pL]i", iv: true, ic: true, classes: []string{"L"}, out: nil},
+
+ {in: "1", val: "[\\pL]", classes: []string{"L"}, out: nil},
+ {in: "1", val: "[\\pL]i", ic: true, classes: []string{"L"}, out: nil},
+ {in: "1", val: "[\\pL]i", ic: true, classes: []string{"L"}, out: nil},
+ {in: "1", val: "[^\\pL]", iv: true, classes: []string{"L"}, out: []byte("1")},
+ {in: "1", val: "[^\\pL]i", iv: true, ic: true, classes: []string{"L"}, out: []byte("1")},
+ {in: "1", val: "[^\\pL]i", iv: true, ic: true, classes: []string{"L"}, out: []byte("1")},
+
+ {in: "ƛ", val: "[\\pL]", classes: []string{"L"}, out: []byte("ƛ")},
+ {in: "ƛ", val: "[\\pL]i", ic: true, classes: []string{"L"}, out: []byte("ƛ")},
+ {in: "ƛ", val: "[\\pL]i", ic: true, classes: []string{"L"}, out: []byte("ƛ")},
+ {in: "ƛ", val: "[^\\pL]", iv: true, classes: []string{"L"}, out: nil},
+ {in: "ƛ", val: "[^\\pL]i", iv: true, ic: true, classes: []string{"L"}, out: nil},
+ {in: "ƛ", val: "[^\\pL]i", iv: true, ic: true, classes: []string{"L"}, out: nil},
+
+ {in: "←a", val: "[\\pL]", classes: []string{"L"}, out: nil},
+ {in: "←a", val: "[\\pL]i", ic: true, classes: []string{"L"}, out: nil},
+ {in: "←a", val: "[\\pL]i", ic: true, classes: []string{"L"}, out: nil},
+ {in: "←a", val: "[^\\pL]", iv: true, classes: []string{"L"}, out: []byte("←")},
+ {in: "←a", val: "[^\\pL]i", iv: true, ic: true, classes: []string{"L"}, out: []byte("←")},
+ {in: "←a", val: "[^\\pL]i", iv: true, ic: true, classes: []string{"L"}, out: []byte("←")},
+
+ {in: "b", val: "[\\p{Latin}]", classes: []string{"Latin"}, out: []byte("b")},
+ {in: "b", val: "[\\p{Latin}]i", ic: true, classes: []string{"Latin"}, out: []byte("b")},
+ {in: "B", val: "[\\p{Latin}]i", ic: true, classes: []string{"Latin"}, out: []byte("B")},
+ {in: "b", val: "[^\\p{Latin}]", iv: true, classes: []string{"Latin"}, out: nil},
+ {in: "b", val: "[^\\p{Latin}]i", ic: true, iv: true, classes: []string{"Latin"}, out: nil},
+ {in: "B", val: "[^\\p{Latin}]i", iv: true, ic: true, classes: []string{"Latin"}, out: nil},
+
+ {in: "", val: "[^<]", iv: true, chars: []rune{'<'}, out: nil},
+ }
+
+ for _, tc := range cases {
+ p := newParser("", []byte(tc.in))
+
+ // advance to the first rune
+ p.read()
+
+ var want interface{}
+ var match bool
+ if tc.out != nil {
+ want = tc.out
+ match = true
+ }
+ lbl := fmt.Sprintf("%q (%t-%t): %q", tc.val, tc.ic, tc.iv, tc.in)
+
+ classes := make([]*unicode.RangeTable, len(tc.classes))
+ for i, c := range tc.classes {
+ classes[i] = rangeTable(c)
+ }
+
+ got, ok := p.parseCharClassMatcher(&charClassMatcher{
+ val: tc.val,
+ chars: tc.chars,
+ ranges: tc.ranges,
+ classes: classes,
+ ignoreCase: tc.ic,
+ inverted: tc.iv,
+ })
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("%s: want %v, got %v", lbl, tc.out, got)
+ }
+ if ok != match {
+ t.Errorf("%s: want match? %t, got %t", lbl, match, ok)
+ }
+ if p.pt.offset != len(tc.out) {
+ t.Errorf("%s: want offset %d, got %d", lbl, len(tc.out), p.pt.offset)
+ }
+ }
+}
+
+func TestParseZeroOrOneExpr(t *testing.T) {
+ cases := []struct {
+ in string
+ lit string
+ out []byte
+ }{
+ {"", "", []byte{}},
+ {"", "a", nil},
+ {"a", "a", []byte("a")},
+ {"a", "b", nil},
+ {"abc", "ab", []byte("ab")},
+ {"ab", "abc", nil},
+ }
+
+ for _, tc := range cases {
+ p := newParser("", []byte(tc.in))
+
+ // advance to the first rune
+ p.read()
+
+ var want interface{}
+ if tc.out != nil {
+ want = tc.out
+ }
+ lbl := fmt.Sprintf("%q: %q", tc.lit, tc.in)
+
+ got, ok := p.parseZeroOrOneExpr(&zeroOrOneExpr{expr: &litMatcher{val: tc.lit}})
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("%q: want %v, got %v", lbl, tc.out, got)
+ }
+ // zero or one always matches
+ if !ok {
+ t.Errorf("%s: want match, got %t", lbl, ok)
+ }
+ if p.pt.offset != len(tc.out) {
+ t.Errorf("%s: want offset %d, got %d", lbl, len(tc.out), p.pt.offset)
+ }
+ }
+}
+
+func TestParseZeroOrMoreExpr(t *testing.T) {
+ cases := []struct {
+ in string
+ lit string
+ out []string
+ }{
+ // ""* is a pathological case - the empty string always matches, so this
+ // is an infinite loop. Not fixing it, because semantically this seems
+ // correct.
+ // {"", "", []byte{}},
+
+ {"", "a", nil},
+ {"a", "a", []string{"a"}},
+ {"a", "b", nil},
+ {"abc", "ab", []string{"ab"}},
+ {"ab", "abc", nil},
+
+ {"aab", "a", []string{"a", "a"}},
+ {"bba", "a", nil},
+ {"bba", "b", []string{"b", "b"}},
+ {"bba", "bb", []string{"bb"}},
+ {"aaaaab", "aa", []string{"aa", "aa"}},
+ {"aaaaab", "a", []string{"a", "a", "a", "a", "a"}},
+ }
+
+ for _, tc := range cases {
+ p := newParser("", []byte(tc.in))
+
+ // advance to the first rune
+ p.read()
+
+ want := make([]interface{}, len(tc.out))
+ for i, v := range tc.out {
+ want[i] = []byte(v)
+ }
+ if tc.out == nil {
+ want = nil
+ }
+ lbl := fmt.Sprintf("%q: %q", tc.lit, tc.in)
+
+ got, ok := p.parseZeroOrMoreExpr(&zeroOrMoreExpr{expr: &litMatcher{val: tc.lit}})
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("%s: want %#v, got %#v", lbl, want, got)
+ }
+ // zero or more always matches
+ if !ok {
+ t.Errorf("%s: want match, got %t", lbl, ok)
+ }
+ wantOffset := 0
+ for _, s := range tc.out {
+ wantOffset += len(s)
+ }
+ if p.pt.offset != wantOffset {
+ t.Errorf("%s: want offset %d, got %d", lbl, wantOffset, p.pt.offset)
+ }
+ }
+}
+
+func TestParseOneOrMoreExpr(t *testing.T) {
+ cases := []struct {
+ in string
+ lit string
+ out []string
+ }{
+ // ""+ is a pathological case - the empty string always matches, so this
+ // is an infinite loop. Not fixing it, because semantically this seems
+ // correct.
+ //{"", "", []string{}},
+
+ {"", "a", nil},
+ {"a", "a", []string{"a"}},
+ {"a", "b", nil},
+ {"abc", "ab", []string{"ab"}},
+ {"ab", "abc", nil},
+
+ {"aab", "a", []string{"a", "a"}},
+ {"bba", "a", nil},
+ {"bba", "b", []string{"b", "b"}},
+ {"bba", "bb", []string{"bb"}},
+ {"aaaaab", "aa", []string{"aa", "aa"}},
+ {"aaaaab", "a", []string{"a", "a", "a", "a", "a"}},
+ }
+
+ for _, tc := range cases {
+ p := newParser("", []byte(tc.in))
+
+ // advance to the first rune
+ p.read()
+
+ var want interface{}
+ var match bool
+ if tc.out != nil {
+ vals := make([]interface{}, len(tc.out))
+ for i, v := range tc.out {
+ vals[i] = []byte(v)
+ }
+ want = vals
+ match = true
+ }
+ lbl := fmt.Sprintf("%q: %q", tc.lit, tc.in)
+
+ got, ok := p.parseOneOrMoreExpr(&oneOrMoreExpr{expr: &litMatcher{val: tc.lit}})
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("%s: want %#v, got %#v", lbl, want, got)
+ }
+ if ok != match {
+ t.Errorf("%s: want match? %t, got %t", lbl, match, ok)
+ }
+ wantOffset := 0
+ for _, s := range tc.out {
+ wantOffset += len(s)
+ }
+ if p.pt.offset != wantOffset {
+ t.Errorf("%s: want offset %d, got %d", lbl, wantOffset, p.pt.offset)
+ }
+ }
+}
+
+func TestParseSeqExpr(t *testing.T) {
+ cases := []struct {
+ in string
+ lits []string
+ out []string
+ }{
+ {"", nil, []string{}}, // empty seq (impossible case via the parser) always matches
+ {"", []string{"a"}, nil},
+ {"a", []string{"a"}, []string{"a"}},
+ {"a", []string{"a", "b"}, nil},
+ {"abc", []string{"a", "b"}, []string{"a", "b"}},
+ {"abc", []string{"a", "b", "c"}, []string{"a", "b", "c"}},
+ {"ab", []string{"a", "b", "c"}, nil},
+ }
+
+ for _, tc := range cases {
+ p := newParser("", []byte(tc.in))
+
+ // advance to the first rune
+ p.read()
+
+ var want interface{}
+ var match bool
+ if tc.out != nil {
+ var vals []interface{}
+ for _, v := range tc.out {
+ vals = append(vals, []byte(v))
+ }
+ want = vals
+ match = true
+ }
+ lbl := fmt.Sprintf("%v: %q", tc.lits, tc.in)
+
+ lits := make([]interface{}, len(tc.lits))
+ for i, l := range tc.lits {
+ lits[i] = &litMatcher{val: l}
+ }
+
+ got, ok := p.parseSeqExpr(&seqExpr{exprs: lits})
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("%s: want %#v, got %#v", lbl, want, got)
+ }
+ if ok != match {
+ t.Errorf("%s: want match? %t, got %t", lbl, match, ok)
+ }
+ wantOffset := 0
+ for _, s := range tc.out {
+ wantOffset += len(s)
+ }
+ if p.pt.offset != wantOffset {
+ t.Errorf("%s: want offset %d, got %d", lbl, wantOffset, p.pt.offset)
+ }
+ }
+}
+
+func TestParseRuleRefExpr(t *testing.T) {
+ p := newParser("", []byte(""))
+
+ func() {
+ defer func() {
+ if e := recover(); e != nil {
+ return
+ }
+ t.Fatal("want panic, got none")
+ }()
+ p.parseRuleRefExpr(&ruleRefExpr{})
+ }()
+
+ p.parseRuleRefExpr(&ruleRefExpr{name: "a"})
+ if p.errs.err() == nil {
+ t.Fatal("want error, got none")
+ }
+}
+
+func TestParseNotExpr(t *testing.T) {
+ cases := []struct {
+ in string
+ lit string
+ match bool
+ }{
+ {"", "", false},
+ {"", "a", true},
+ {"a", "a", false},
+ {"b", "a", true},
+ {"ab", "a", false},
+ {"ab", "ab", false},
+ {"ab", "abc", true},
+ {"abc", "abc", false},
+ {"abc", "ab", false},
+ {"abc", "ac", true},
+ }
+ for _, tc := range cases {
+ p := newParser("", []byte(tc.in))
+
+ // advance to the first rune
+ p.read()
+
+ lbl := fmt.Sprintf("%q: %q", tc.lit, tc.in)
+
+ _, ok := p.parseNotExpr(¬Expr{expr: &litMatcher{val: tc.lit}})
+ if ok != tc.match {
+ t.Errorf("%s: want match? %t, got %t", lbl, tc.match, ok)
+ }
+ if p.pt.offset != 0 {
+ t.Errorf("%s: want offset %d, got %d", lbl, 0, p.pt.offset)
+ }
+ }
+}
+
+func TestParseAndExpr(t *testing.T) {
+ cases := []struct {
+ in string
+ lit string
+ match bool
+ }{
+ {"", "", true},
+ {"", "a", false},
+ {"a", "a", true},
+ {"b", "a", false},
+ {"ab", "a", true},
+ {"ab", "ab", true},
+ {"ab", "abc", false},
+ {"abc", "abc", true},
+ {"abc", "ab", true},
+ {"abc", "ac", false},
+ }
+ for _, tc := range cases {
+ p := newParser("", []byte(tc.in))
+
+ // advance to the first rune
+ p.read()
+
+ lbl := fmt.Sprintf("%q: %q", tc.lit, tc.in)
+
+ _, ok := p.parseAndExpr(&andExpr{expr: &litMatcher{val: tc.lit}})
+ if ok != tc.match {
+ t.Errorf("%s: want match? %t, got %t", lbl, tc.match, ok)
+ }
+ if p.pt.offset != 0 {
+ t.Errorf("%s: want offset %d, got %d", lbl, 0, p.pt.offset)
+ }
+ }
+}
+
+func TestParseNotCodeExpr(t *testing.T) {
+ cases := []struct {
+ in string
+ b bool
+ err error
+ }{
+ {"", true, nil},
+ {"", true, io.EOF},
+ {"", false, nil},
+ {"", false, io.EOF},
+ {"a", true, nil},
+ {"a", true, io.EOF},
+ {"a", false, nil},
+ {"a", false, io.EOF},
+ }
+
+ for _, tc := range cases {
+ fn := func(_ *parser) (bool, error) {
+ return tc.b, tc.err
+ }
+ p := newParser("", []byte(tc.in))
+
+ // advance to the first rune
+ p.read()
+
+ lbl := fmt.Sprintf("%q: %t-%t", tc.in, tc.b, tc.err == nil)
+
+ _, ok := p.parseNotCodeExpr(¬CodeExpr{run: fn})
+ if ok != !tc.b {
+ t.Errorf("%s: want match? %t, got %t", lbl, !tc.b, ok)
+ }
+
+ el := *p.errs
+ wantn := 0
+ if tc.err != nil {
+ wantn = 1
+ }
+ if len(el) != wantn {
+ t.Errorf("%s: want %d error, got %d", lbl, wantn, len(el))
+ } else if wantn == 1 {
+ ie := el[0].(*parserError).Inner
+ if ie != tc.err {
+ t.Errorf("%s: want error %v, got %v", lbl, tc.err, ie)
+ }
+ }
+
+ if p.pt.offset != 0 {
+ t.Errorf("%s: want offset %d, got %d", lbl, 0, p.pt.offset)
+ }
+ }
+}
+
+func TestParseAndCodeExpr(t *testing.T) {
+ cases := []struct {
+ in string
+ b bool
+ err error
+ }{
+ {"", true, nil},
+ {"", true, io.EOF},
+ {"", false, nil},
+ {"", false, io.EOF},
+ {"a", true, nil},
+ {"a", true, io.EOF},
+ {"a", false, nil},
+ {"a", false, io.EOF},
+ }
+
+ for _, tc := range cases {
+ fn := func(_ *parser) (bool, error) {
+ return tc.b, tc.err
+ }
+ p := newParser("", []byte(tc.in))
+
+ // advance to the first rune
+ p.read()
+
+ lbl := fmt.Sprintf("%q: %t-%t", tc.in, tc.b, tc.err == nil)
+
+ _, ok := p.parseAndCodeExpr(&andCodeExpr{run: fn})
+ if ok != tc.b {
+ t.Errorf("%s: want match? %t, got %t", lbl, tc.b, ok)
+ }
+
+ el := *p.errs
+ wantn := 0
+ if tc.err != nil {
+ wantn = 1
+ }
+ if len(el) != wantn {
+ t.Errorf("%s: want %d error, got %d", lbl, wantn, len(el))
+ } else if wantn == 1 {
+ ie := el[0].(*parserError).Inner
+ if ie != tc.err {
+ t.Errorf("%s: want error %v, got %v", lbl, tc.err, ie)
+ }
+ }
+
+ if p.pt.offset != 0 {
+ t.Errorf("%s: want offset %d, got %d", lbl, 0, p.pt.offset)
+ }
+ }
+}
+
+func TestParseLabeledExpr(t *testing.T) {
+ cases := []struct {
+ in string
+ lit string
+ out []byte
+ }{
+ {"", "", []byte{}},
+ {"", "a", nil},
+ {"a", "a", []byte("a")},
+ {"a", "ab", nil},
+ {"ab", "a", []byte("a")},
+ {"ab", "ab", []byte("ab")},
+ {"ab", "abc", nil},
+ {"abc", "ab", []byte("ab")},
+ }
+
+ for _, tc := range cases {
+ p := newParser("", []byte(tc.in))
+
+ // advance to the first rune
+ p.read()
+ p.pushV()
+
+ var want interface{}
+ var match bool
+ if tc.out != nil {
+ match = true
+ want = tc.out
+ }
+ lbl := fmt.Sprintf("%q: %q", tc.lit, tc.in)
+
+ got, ok := p.parseLabeledExpr(&labeledExpr{label: "l", expr: &litMatcher{val: tc.lit}})
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("%s: want %v, got %v", lbl, tc.out, got)
+ }
+ if ok != match {
+ t.Errorf("%s: want match? %t, got %t", lbl, match, ok)
+ } else {
+ // must be 1 var set on the stack
+ if len(p.vstack) != 1 {
+ t.Errorf("%s: want %d var sets on the stack, got %d", lbl, 1, len(p.vstack))
+ } else {
+ vs := p.vstack[0]
+ if !reflect.DeepEqual(vs["l"], got) {
+ t.Errorf("%s: want %v on the stack for this label, got %v", lbl, got, vs["l"])
+ }
+ }
+ }
+
+ if p.pt.offset != len(tc.out) {
+ t.Errorf("%s: want offset %d, got %d", lbl, len(tc.out), p.pt.offset)
+ }
+ }
+}
+
+func TestParseChoiceExpr(t *testing.T) {
+ cases := []struct {
+ in string
+ lits []string
+ out []byte
+ }{
+ {"", nil, nil}, // empty choice (impossible case via the parser)
+
+ {"", []string{"a"}, nil},
+ {"a", []string{"a"}, []byte("a")},
+ {"a", []string{"b"}, nil},
+ {"ab", []string{"b"}, nil},
+ {"ba", []string{"b"}, []byte("b")},
+ {"a", []string{"a", "b"}, []byte("a")},
+ {"a", []string{"b", "a"}, []byte("a")},
+ {"ab", []string{"a", "b"}, []byte("a")},
+ {"ab", []string{"b", "a"}, []byte("a")},
+ {"cb", []string{"a", "b"}, nil},
+ {"cb", []string{"b", "a"}, nil},
+ {"abcd", []string{"abc", "ab", "a"}, []byte("abc")},
+ {"abcd", []string{"a", "ab", "abc"}, []byte("a")},
+ {"bcd", []string{"a", "ab", "abc"}, nil},
+ }
+
+ for _, tc := range cases {
+ p := newParser("", []byte(tc.in))
+
+ // advance to the first rune
+ p.read()
+
+ var want interface{}
+ var match bool
+ if tc.out != nil {
+ want = tc.out
+ match = true
+ }
+ lbl := fmt.Sprintf("%v: %q", tc.lits, tc.in)
+
+ lits := make([]interface{}, len(tc.lits))
+ for i, l := range tc.lits {
+ lits[i] = &litMatcher{val: l}
+ }
+
+ got, ok := p.parseChoiceExpr(&choiceExpr{alternatives: lits})
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("%s: want %#v, got %#v", lbl, want, got)
+ }
+ if ok != match {
+ t.Errorf("%s: want match? %t, got %t", lbl, match, ok)
+ }
+ if p.pt.offset != len(tc.out) {
+ t.Errorf("%s: want offset %d, got %d", lbl, len(tc.out), p.pt.offset)
+ }
+ }
+}
+
+func TestParseActionExpr(t *testing.T) {
+ cases := []struct {
+ in string
+ lit string
+ v interface{}
+ err error
+ }{
+ {"", "", 1, nil}, // empty string always matches
+ {"", "", 1, io.EOF},
+ {"", "a", nil, nil},
+ {"a", "a", 1, nil},
+ {"a", "a", 1, io.EOF},
+ {"ab", "a", 1, nil},
+ {"ab", "a", 1, io.EOF},
+ {"ba", "a", nil, nil},
+ }
+
+ for _, tc := range cases {
+ called := false
+ fn := func(_ *parser) (interface{}, error) {
+ called = true
+ return tc.v, tc.err
+ }
+ p := newParser("", []byte(tc.in))
+
+ // advance to the first rune
+ p.read()
+
+ lbl := fmt.Sprintf("%q: %q", tc.in, tc.lit)
+
+ match := tc.v != nil
+
+ got, ok := p.parseActionExpr(&actionExpr{run: fn, expr: &litMatcher{val: tc.lit}})
+ if ok != match {
+ t.Errorf("%s: want match? %t, got %t", lbl, match, ok)
+ }
+ if !reflect.DeepEqual(got, tc.v) {
+ t.Errorf("%s: want %#v, got %#v", lbl, tc.v, got)
+ }
+ if match != called {
+ t.Errorf("%s: want action code to be called? %t, got %t", lbl, match, called)
+ }
+
+ el := *p.errs
+ wantn := 0
+ if tc.err != nil {
+ wantn = 1
+ }
+ if len(el) != wantn {
+ t.Errorf("%s: want %d error, got %d", lbl, wantn, len(el))
+ } else if wantn == 1 {
+ ie := el[0].(*parserError).Inner
+ if ie != tc.err {
+ t.Errorf("%s: want error %v, got %v", lbl, tc.err, ie)
+ }
+ }
+
+ wantOffset := 0
+ if match {
+ wantOffset = len(tc.lit)
+ }
+ if p.pt.offset != wantOffset {
+ t.Errorf("%s: want offset %d, got %d", lbl, wantOffset, p.pt.offset)
+ }
+ }
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/test/andnot/andnot.go b/vendor/github.com/PuerkitoBio/pigeon/test/andnot/andnot.go
new file mode 100644
index 0000000000..ef2cff1418
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/test/andnot/andnot.go
@@ -0,0 +1,1038 @@
+package main
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+func main() {
+ in := os.Stdin
+ if len(os.Args) > 1 {
+ f, err := os.Open(os.Args[1])
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer f.Close()
+ in = f
+ }
+ got, err := ParseReader("", in)
+ fmt.Println(got, err)
+}
+
+func toString(v interface{}) string {
+ ifSl := v.([]interface{})
+ var res string
+ for _, s := range ifSl {
+ res += string(s.([]byte))
+ }
+ return res
+}
+
+var g = &grammar{
+ rules: []*rule{
+ {
+ name: "Input",
+ pos: position{line: 28, col: 1, offset: 406},
+ expr: &seqExpr{
+ pos: position{line: 28, col: 9, offset: 416},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 28, col: 9, offset: 416},
+ name: "_",
+ },
+ &ruleRefExpr{
+ pos: position{line: 28, col: 11, offset: 418},
+ name: "AB",
+ },
+ &ruleRefExpr{
+ pos: position{line: 28, col: 14, offset: 421},
+ name: "_",
+ },
+ &ruleRefExpr{
+ pos: position{line: 28, col: 16, offset: 423},
+ name: "EOF",
+ },
+ },
+ },
+ },
+ {
+ name: "AB",
+ pos: position{line: 30, col: 1, offset: 428},
+ expr: &choiceExpr{
+ pos: position{line: 30, col: 6, offset: 435},
+ alternatives: []interface{}{
+ &seqExpr{
+ pos: position{line: 30, col: 6, offset: 435},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 30, col: 6, offset: 435},
+ label: "abees",
+ expr: &oneOrMoreExpr{
+ pos: position{line: 30, col: 12, offset: 441},
+ expr: &charClassMatcher{
+ pos: position{line: 30, col: 12, offset: 441},
+ val: "[ab]",
+ chars: []rune{'a', 'b'},
+ ignoreCase: false,
+ inverted: false,
+ },
+ },
+ },
+ &andCodeExpr{
+ pos: position{line: 30, col: 18, offset: 447},
+ run: (*parser).callonAB6,
+ },
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 30, col: 77, offset: 506},
+ name: "CD",
+ },
+ },
+ },
+ },
+ {
+ name: "CD",
+ pos: position{line: 31, col: 1, offset: 509},
+ expr: &seqExpr{
+ pos: position{line: 31, col: 6, offset: 516},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 31, col: 6, offset: 516},
+ label: "ceedees",
+ expr: &oneOrMoreExpr{
+ pos: position{line: 31, col: 14, offset: 524},
+ expr: &charClassMatcher{
+ pos: position{line: 31, col: 14, offset: 524},
+ val: "[cd]",
+ chars: []rune{'c', 'd'},
+ ignoreCase: false,
+ inverted: false,
+ },
+ },
+ },
+ ¬CodeExpr{
+ pos: position{line: 31, col: 20, offset: 530},
+ run: (*parser).callonCD5,
+ },
+ },
+ },
+ },
+ {
+ name: "_",
+ pos: position{line: 33, col: 1, offset: 590},
+ expr: &zeroOrMoreExpr{
+ pos: position{line: 33, col: 5, offset: 596},
+ expr: &charClassMatcher{
+ pos: position{line: 33, col: 5, offset: 596},
+ val: "[ \\t\\n\\r]",
+ chars: []rune{' ', '\t', '\n', '\r'},
+ ignoreCase: false,
+ inverted: false,
+ },
+ },
+ },
+ {
+ name: "EOF",
+ pos: position{line: 34, col: 1, offset: 607},
+ expr: ¬Expr{
+ pos: position{line: 34, col: 7, offset: 615},
+ expr: &anyMatcher{
+ line: 34, col: 8, offset: 616,
+ },
+ },
+ },
+ },
+}
+
+func (c *current) onAB6(abees interface{}) (bool, error) {
+ return strings.HasSuffix(toString(abees), "b"), nil
+}
+
+func (p *parser) callonAB6() (bool, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onAB6(stack["abees"])
+}
+
+func (c *current) onCD5(ceedees interface{}) (bool, error) {
+ return strings.HasSuffix(toString(ceedees), "c"), nil
+}
+
+func (p *parser) callonCD5() (bool, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onCD5(stack["ceedees"])
+}
+
+var (
+ // errNoRule is returned when the grammar to parse has no rule.
+ errNoRule = errors.New("grammar has no rule")
+
+ // errInvalidEncoding is returned when the source is not properly
+ // utf8-encoded.
+ errInvalidEncoding = errors.New("invalid encoding")
+
+ // errNoMatch is returned if no match could be found.
+ errNoMatch = errors.New("no match found")
+)
+
+// Option is a function that can set an option on the parser. It returns
+// the previous setting as an Option.
+type Option func(*parser) Option
+
+// Debug creates an Option to set the debug flag to b. When set to true,
+// debugging information is printed to stdout while parsing.
+//
+// The default is false.
+func Debug(b bool) Option {
+ return func(p *parser) Option {
+ old := p.debug
+ p.debug = b
+ return Debug(old)
+ }
+}
+
+// Memoize creates an Option to set the memoize flag to b. When set to true,
+// the parser will cache all results so each expression is evaluated only
+// once. This guarantees linear parsing time even for pathological cases,
+// at the expense of more memory and slower times for typical cases.
+//
+// The default is false.
+func Memoize(b bool) Option {
+ return func(p *parser) Option {
+ old := p.memoize
+ p.memoize = b
+ return Memoize(old)
+ }
+}
+
+// Recover creates an Option to set the recover flag to b. When set to
+// true, this causes the parser to recover from panics and convert it
+// to an error. Setting it to false can be useful while debugging to
+// access the full stack trace.
+//
+// The default is true.
+func Recover(b bool) Option {
+ return func(p *parser) Option {
+ old := p.recover
+ p.recover = b
+ return Recover(old)
+ }
+}
+
+// ParseFile parses the file identified by filename.
+func ParseFile(filename string, opts ...Option) (interface{}, error) {
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ return ParseReader(filename, f, opts...)
+}
+
+// ParseReader parses the data from r using filename as information in the
+// error messages.
+func ParseReader(filename string, r io.Reader, opts ...Option) (interface{}, error) {
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+
+ return Parse(filename, b, opts...)
+}
+
+// Parse parses the data from b using filename as information in the
+// error messages.
+func Parse(filename string, b []byte, opts ...Option) (interface{}, error) {
+ return newParser(filename, b, opts...).parse(g)
+}
+
+// position records a position in the text.
+type position struct {
+ line, col, offset int
+}
+
+func (p position) String() string {
+ return fmt.Sprintf("%d:%d [%d]", p.line, p.col, p.offset)
+}
+
+// savepoint stores all state required to go back to this point in the
+// parser.
+type savepoint struct {
+ position
+ rn rune
+ w int
+}
+
+type current struct {
+ pos position // start position of the match
+ text []byte // raw text of the match
+}
+
+// the AST types...
+
+type grammar struct {
+ pos position
+ rules []*rule
+}
+
+type rule struct {
+ pos position
+ name string
+ displayName string
+ expr interface{}
+}
+
+type choiceExpr struct {
+ pos position
+ alternatives []interface{}
+}
+
+type actionExpr struct {
+ pos position
+ expr interface{}
+ run func(*parser) (interface{}, error)
+}
+
+type seqExpr struct {
+ pos position
+ exprs []interface{}
+}
+
+type labeledExpr struct {
+ pos position
+ label string
+ expr interface{}
+}
+
+type expr struct {
+ pos position
+ expr interface{}
+}
+
+type andExpr expr
+type notExpr expr
+type zeroOrOneExpr expr
+type zeroOrMoreExpr expr
+type oneOrMoreExpr expr
+
+type ruleRefExpr struct {
+ pos position
+ name string
+}
+
+type andCodeExpr struct {
+ pos position
+ run func(*parser) (bool, error)
+}
+
+type notCodeExpr struct {
+ pos position
+ run func(*parser) (bool, error)
+}
+
+type litMatcher struct {
+ pos position
+ val string
+ ignoreCase bool
+}
+
+type charClassMatcher struct {
+ pos position
+ val string
+ chars []rune
+ ranges []rune
+ classes []*unicode.RangeTable
+ ignoreCase bool
+ inverted bool
+}
+
+type anyMatcher position
+
+// errList cumulates the errors found by the parser.
+type errList []error
+
+func (e *errList) add(err error) {
+ *e = append(*e, err)
+}
+
+func (e errList) err() error {
+ if len(e) == 0 {
+ return nil
+ }
+ e.dedupe()
+ return e
+}
+
+func (e *errList) dedupe() {
+ var cleaned []error
+ set := make(map[string]bool)
+ for _, err := range *e {
+ if msg := err.Error(); !set[msg] {
+ set[msg] = true
+ cleaned = append(cleaned, err)
+ }
+ }
+ *e = cleaned
+}
+
+func (e errList) Error() string {
+ switch len(e) {
+ case 0:
+ return ""
+ case 1:
+ return e[0].Error()
+ default:
+ var buf bytes.Buffer
+
+ for i, err := range e {
+ if i > 0 {
+ buf.WriteRune('\n')
+ }
+ buf.WriteString(err.Error())
+ }
+ return buf.String()
+ }
+}
+
+// parserError wraps an error with a prefix indicating the rule in which
+// the error occurred. The original error is stored in the Inner field.
+type parserError struct {
+ Inner error
+ pos position
+ prefix string
+}
+
+// Error returns the error message.
+func (p *parserError) Error() string {
+ return p.prefix + ": " + p.Inner.Error()
+}
+
+// newParser creates a parser with the specified input source and options.
+func newParser(filename string, b []byte, opts ...Option) *parser {
+ p := &parser{
+ filename: filename,
+ errs: new(errList),
+ data: b,
+ pt: savepoint{position: position{line: 1}},
+ recover: true,
+ }
+ p.setOptions(opts)
+ return p
+}
+
+// setOptions applies the options to the parser.
+func (p *parser) setOptions(opts []Option) {
+ for _, opt := range opts {
+ opt(p)
+ }
+}
+
+type resultTuple struct {
+ v interface{}
+ b bool
+ end savepoint
+}
+
+type parser struct {
+ filename string
+ pt savepoint
+ cur current
+
+ data []byte
+ errs *errList
+
+ recover bool
+ debug bool
+ depth int
+
+ memoize bool
+ // memoization table for the packrat algorithm:
+ // map[offset in source] map[expression or rule] {value, match}
+ memo map[int]map[interface{}]resultTuple
+
+ // rules table, maps the rule identifier to the rule node
+ rules map[string]*rule
+ // variables stack, map of label to value
+ vstack []map[string]interface{}
+ // rule stack, allows identification of the current rule in errors
+ rstack []*rule
+
+ // stats
+ exprCnt int
+}
+
+// push a variable set on the vstack.
+func (p *parser) pushV() {
+ if cap(p.vstack) == len(p.vstack) {
+ // create new empty slot in the stack
+ p.vstack = append(p.vstack, nil)
+ } else {
+ // slice to 1 more
+ p.vstack = p.vstack[:len(p.vstack)+1]
+ }
+
+ // get the last args set
+ m := p.vstack[len(p.vstack)-1]
+ if m != nil && len(m) == 0 {
+ // empty map, all good
+ return
+ }
+
+ m = make(map[string]interface{})
+ p.vstack[len(p.vstack)-1] = m
+}
+
+// pop a variable set from the vstack.
+func (p *parser) popV() {
+ // if the map is not empty, clear it
+ m := p.vstack[len(p.vstack)-1]
+ if len(m) > 0 {
+ // GC that map
+ p.vstack[len(p.vstack)-1] = nil
+ }
+ p.vstack = p.vstack[:len(p.vstack)-1]
+}
+
+func (p *parser) print(prefix, s string) string {
+ if !p.debug {
+ return s
+ }
+
+ fmt.Printf("%s %d:%d:%d: %s [%#U]\n",
+ prefix, p.pt.line, p.pt.col, p.pt.offset, s, p.pt.rn)
+ return s
+}
+
+func (p *parser) in(s string) string {
+ p.depth++
+ return p.print(strings.Repeat(" ", p.depth)+">", s)
+}
+
+func (p *parser) out(s string) string {
+ p.depth--
+ return p.print(strings.Repeat(" ", p.depth)+"<", s)
+}
+
+func (p *parser) addErr(err error) {
+ p.addErrAt(err, p.pt.position)
+}
+
+func (p *parser) addErrAt(err error, pos position) {
+ var buf bytes.Buffer
+ if p.filename != "" {
+ buf.WriteString(p.filename)
+ }
+ if buf.Len() > 0 {
+ buf.WriteString(":")
+ }
+ buf.WriteString(fmt.Sprintf("%d:%d (%d)", pos.line, pos.col, pos.offset))
+ if len(p.rstack) > 0 {
+ if buf.Len() > 0 {
+ buf.WriteString(": ")
+ }
+ rule := p.rstack[len(p.rstack)-1]
+ if rule.displayName != "" {
+ buf.WriteString("rule " + rule.displayName)
+ } else {
+ buf.WriteString("rule " + rule.name)
+ }
+ }
+ pe := &parserError{Inner: err, prefix: buf.String()}
+ p.errs.add(pe)
+}
+
+// read advances the parser to the next rune.
+func (p *parser) read() {
+ p.pt.offset += p.pt.w
+ rn, n := utf8.DecodeRune(p.data[p.pt.offset:])
+ p.pt.rn = rn
+ p.pt.w = n
+ p.pt.col++
+ if rn == '\n' {
+ p.pt.line++
+ p.pt.col = 0
+ }
+
+ if rn == utf8.RuneError {
+ if n > 0 {
+ p.addErr(errInvalidEncoding)
+ }
+ }
+}
+
+// restore parser position to the savepoint pt.
+func (p *parser) restore(pt savepoint) {
+ if p.debug {
+ defer p.out(p.in("restore"))
+ }
+ if pt.offset == p.pt.offset {
+ return
+ }
+ p.pt = pt
+}
+
+// get the slice of bytes from the savepoint start to the current position.
+func (p *parser) sliceFrom(start savepoint) []byte {
+ return p.data[start.position.offset:p.pt.position.offset]
+}
+
+func (p *parser) getMemoized(node interface{}) (resultTuple, bool) {
+ if len(p.memo) == 0 {
+ return resultTuple{}, false
+ }
+ m := p.memo[p.pt.offset]
+ if len(m) == 0 {
+ return resultTuple{}, false
+ }
+ res, ok := m[node]
+ return res, ok
+}
+
+func (p *parser) setMemoized(pt savepoint, node interface{}, tuple resultTuple) {
+ if p.memo == nil {
+ p.memo = make(map[int]map[interface{}]resultTuple)
+ }
+ m := p.memo[pt.offset]
+ if m == nil {
+ m = make(map[interface{}]resultTuple)
+ p.memo[pt.offset] = m
+ }
+ m[node] = tuple
+}
+
+func (p *parser) buildRulesTable(g *grammar) {
+ p.rules = make(map[string]*rule, len(g.rules))
+ for _, r := range g.rules {
+ p.rules[r.name] = r
+ }
+}
+
+func (p *parser) parse(g *grammar) (val interface{}, err error) {
+ if len(g.rules) == 0 {
+ p.addErr(errNoRule)
+ return nil, p.errs.err()
+ }
+
+ // TODO : not super critical but this could be generated
+ p.buildRulesTable(g)
+
+ if p.recover {
+ // panic can be used in action code to stop parsing immediately
+ // and return the panic as an error.
+ defer func() {
+ if e := recover(); e != nil {
+ if p.debug {
+ defer p.out(p.in("panic handler"))
+ }
+ val = nil
+ switch e := e.(type) {
+ case error:
+ p.addErr(e)
+ default:
+ p.addErr(fmt.Errorf("%v", e))
+ }
+ err = p.errs.err()
+ }
+ }()
+ }
+
+ // start rule is rule [0]
+ p.read() // advance to first rune
+ val, ok := p.parseRule(g.rules[0])
+ if !ok {
+ if len(*p.errs) == 0 {
+ // make sure this doesn't go out silently
+ p.addErr(errNoMatch)
+ }
+ return nil, p.errs.err()
+ }
+ return val, p.errs.err()
+}
+
+func (p *parser) parseRule(rule *rule) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseRule " + rule.name))
+ }
+
+ if p.memoize {
+ res, ok := p.getMemoized(rule)
+ if ok {
+ p.restore(res.end)
+ return res.v, res.b
+ }
+ }
+
+ start := p.pt
+ p.rstack = append(p.rstack, rule)
+ p.pushV()
+ val, ok := p.parseExpr(rule.expr)
+ p.popV()
+ p.rstack = p.rstack[:len(p.rstack)-1]
+ if ok && p.debug {
+ p.print(strings.Repeat(" ", p.depth)+"MATCH", string(p.sliceFrom(start)))
+ }
+
+ if p.memoize {
+ p.setMemoized(start, rule, resultTuple{val, ok, p.pt})
+ }
+ return val, ok
+}
+
+func (p *parser) parseExpr(expr interface{}) (interface{}, bool) {
+ var pt savepoint
+ var ok bool
+
+ if p.memoize {
+ res, ok := p.getMemoized(expr)
+ if ok {
+ p.restore(res.end)
+ return res.v, res.b
+ }
+ pt = p.pt
+ }
+
+ p.exprCnt++
+ var val interface{}
+ switch expr := expr.(type) {
+ case *actionExpr:
+ val, ok = p.parseActionExpr(expr)
+ case *andCodeExpr:
+ val, ok = p.parseAndCodeExpr(expr)
+ case *andExpr:
+ val, ok = p.parseAndExpr(expr)
+ case *anyMatcher:
+ val, ok = p.parseAnyMatcher(expr)
+ case *charClassMatcher:
+ val, ok = p.parseCharClassMatcher(expr)
+ case *choiceExpr:
+ val, ok = p.parseChoiceExpr(expr)
+ case *labeledExpr:
+ val, ok = p.parseLabeledExpr(expr)
+ case *litMatcher:
+ val, ok = p.parseLitMatcher(expr)
+ case *notCodeExpr:
+ val, ok = p.parseNotCodeExpr(expr)
+ case *notExpr:
+ val, ok = p.parseNotExpr(expr)
+ case *oneOrMoreExpr:
+ val, ok = p.parseOneOrMoreExpr(expr)
+ case *ruleRefExpr:
+ val, ok = p.parseRuleRefExpr(expr)
+ case *seqExpr:
+ val, ok = p.parseSeqExpr(expr)
+ case *zeroOrMoreExpr:
+ val, ok = p.parseZeroOrMoreExpr(expr)
+ case *zeroOrOneExpr:
+ val, ok = p.parseZeroOrOneExpr(expr)
+ default:
+ panic(fmt.Sprintf("unknown expression type %T", expr))
+ }
+ if p.memoize {
+ p.setMemoized(pt, expr, resultTuple{val, ok, p.pt})
+ }
+ return val, ok
+}
+
+func (p *parser) parseActionExpr(act *actionExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseActionExpr"))
+ }
+
+ start := p.pt
+ val, ok := p.parseExpr(act.expr)
+ if ok {
+ p.cur.pos = start.position
+ p.cur.text = p.sliceFrom(start)
+ actVal, err := act.run(p)
+ if err != nil {
+ p.addErrAt(err, start.position)
+ }
+ val = actVal
+ }
+ if ok && p.debug {
+ p.print(strings.Repeat(" ", p.depth)+"MATCH", string(p.sliceFrom(start)))
+ }
+ return val, ok
+}
+
+func (p *parser) parseAndCodeExpr(and *andCodeExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseAndCodeExpr"))
+ }
+
+ ok, err := and.run(p)
+ if err != nil {
+ p.addErr(err)
+ }
+ return nil, ok
+}
+
+func (p *parser) parseAndExpr(and *andExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseAndExpr"))
+ }
+
+ pt := p.pt
+ p.pushV()
+ _, ok := p.parseExpr(and.expr)
+ p.popV()
+ p.restore(pt)
+ return nil, ok
+}
+
+func (p *parser) parseAnyMatcher(any *anyMatcher) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseAnyMatcher"))
+ }
+
+ if p.pt.rn != utf8.RuneError {
+ start := p.pt
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ return nil, false
+}
+
+func (p *parser) parseCharClassMatcher(chr *charClassMatcher) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseCharClassMatcher"))
+ }
+
+ cur := p.pt.rn
+ // can't match EOF
+ if cur == utf8.RuneError {
+ return nil, false
+ }
+ start := p.pt
+ if chr.ignoreCase {
+ cur = unicode.ToLower(cur)
+ }
+
+ // try to match in the list of available chars
+ for _, rn := range chr.chars {
+ if rn == cur {
+ if chr.inverted {
+ return nil, false
+ }
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ }
+
+ // try to match in the list of ranges
+ for i := 0; i < len(chr.ranges); i += 2 {
+ if cur >= chr.ranges[i] && cur <= chr.ranges[i+1] {
+ if chr.inverted {
+ return nil, false
+ }
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ }
+
+ // try to match in the list of Unicode classes
+ for _, cl := range chr.classes {
+ if unicode.Is(cl, cur) {
+ if chr.inverted {
+ return nil, false
+ }
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ }
+
+ if chr.inverted {
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ return nil, false
+}
+
+func (p *parser) parseChoiceExpr(ch *choiceExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseChoiceExpr"))
+ }
+
+ for _, alt := range ch.alternatives {
+ p.pushV()
+ val, ok := p.parseExpr(alt)
+ p.popV()
+ if ok {
+ return val, ok
+ }
+ }
+ return nil, false
+}
+
+func (p *parser) parseLabeledExpr(lab *labeledExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseLabeledExpr"))
+ }
+
+ p.pushV()
+ val, ok := p.parseExpr(lab.expr)
+ p.popV()
+ if ok && lab.label != "" {
+ m := p.vstack[len(p.vstack)-1]
+ m[lab.label] = val
+ }
+ return val, ok
+}
+
+func (p *parser) parseLitMatcher(lit *litMatcher) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseLitMatcher"))
+ }
+
+ start := p.pt
+ for _, want := range lit.val {
+ cur := p.pt.rn
+ if lit.ignoreCase {
+ cur = unicode.ToLower(cur)
+ }
+ if cur != want {
+ p.restore(start)
+ return nil, false
+ }
+ p.read()
+ }
+ return p.sliceFrom(start), true
+}
+
+func (p *parser) parseNotCodeExpr(not *notCodeExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseNotCodeExpr"))
+ }
+
+ ok, err := not.run(p)
+ if err != nil {
+ p.addErr(err)
+ }
+ return nil, !ok
+}
+
+func (p *parser) parseNotExpr(not *notExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseNotExpr"))
+ }
+
+ pt := p.pt
+ p.pushV()
+ _, ok := p.parseExpr(not.expr)
+ p.popV()
+ p.restore(pt)
+ return nil, !ok
+}
+
+func (p *parser) parseOneOrMoreExpr(expr *oneOrMoreExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseOneOrMoreExpr"))
+ }
+
+ var vals []interface{}
+
+ for {
+ p.pushV()
+ val, ok := p.parseExpr(expr.expr)
+ p.popV()
+ if !ok {
+ if len(vals) == 0 {
+ // did not match once, no match
+ return nil, false
+ }
+ return vals, true
+ }
+ vals = append(vals, val)
+ }
+}
+
+func (p *parser) parseRuleRefExpr(ref *ruleRefExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseRuleRefExpr " + ref.name))
+ }
+
+ if ref.name == "" {
+ panic(fmt.Sprintf("%s: invalid rule: missing name", ref.pos))
+ }
+
+ rule := p.rules[ref.name]
+ if rule == nil {
+ p.addErr(fmt.Errorf("undefined rule: %s", ref.name))
+ return nil, false
+ }
+ return p.parseRule(rule)
+}
+
+func (p *parser) parseSeqExpr(seq *seqExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseSeqExpr"))
+ }
+
+ var vals []interface{}
+
+ pt := p.pt
+ for _, expr := range seq.exprs {
+ val, ok := p.parseExpr(expr)
+ if !ok {
+ p.restore(pt)
+ return nil, false
+ }
+ vals = append(vals, val)
+ }
+ return vals, true
+}
+
+func (p *parser) parseZeroOrMoreExpr(expr *zeroOrMoreExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseZeroOrMoreExpr"))
+ }
+
+ var vals []interface{}
+
+ for {
+ p.pushV()
+ val, ok := p.parseExpr(expr.expr)
+ p.popV()
+ if !ok {
+ return vals, true
+ }
+ vals = append(vals, val)
+ }
+}
+
+func (p *parser) parseZeroOrOneExpr(expr *zeroOrOneExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseZeroOrOneExpr"))
+ }
+
+ p.pushV()
+ val, _ := p.parseExpr(expr.expr)
+ p.popV()
+ // whether it matched or not, consider it a match
+ return val, true
+}
+
+func rangeTable(class string) *unicode.RangeTable {
+ if rt, ok := unicode.Categories[class]; ok {
+ return rt
+ }
+ if rt, ok := unicode.Properties[class]; ok {
+ return rt
+ }
+ if rt, ok := unicode.Scripts[class]; ok {
+ return rt
+ }
+
+ // cannot happen
+ panic(fmt.Sprintf("invalid Unicode class: %s", class))
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/test/andnot/andnot.peg b/vendor/github.com/PuerkitoBio/pigeon/test/andnot/andnot.peg
new file mode 100644
index 0000000000..90d0557df4
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/test/andnot/andnot.peg
@@ -0,0 +1,35 @@
+{
+package main
+
+func main() {
+ in := os.Stdin
+ if len(os.Args) > 1 {
+ f, err := os.Open(os.Args[1])
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer f.Close()
+ in = f
+ }
+ got, err := ParseReader("", in)
+ fmt.Println(got, err)
+}
+
+func toString(v interface{}) string {
+ ifSl := v.([]interface{})
+ var res string
+ for _, s := range ifSl {
+ res += string(s.([]byte))
+ }
+ return res
+}
+}
+
+Input ← _ AB _ EOF
+
+AB ← abees:[ab]+ &{ return strings.HasSuffix(toString(abees), "b"), nil } / CD
+CD ← ceedees:[cd]+ !{ return strings.HasSuffix(toString(ceedees), "c"), nil }
+
+_ ← [ \t\n\r]*
+EOF ← !.
+
diff --git a/vendor/github.com/PuerkitoBio/pigeon/test/andnot/andnot_test.go b/vendor/github.com/PuerkitoBio/pigeon/test/andnot/andnot_test.go
new file mode 100644
index 0000000000..1f77d2f5cd
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/test/andnot/andnot_test.go
@@ -0,0 +1,36 @@
+package main
+
+import "testing"
+
+// ABs must end in Bs, CDs must end in Ds
+var cases = map[string]string{
+ "": "1:1 (0): no match found",
+ "a": "1:1 (0): no match found",
+ "b": "",
+ "ab": "",
+ "ba": "1:1 (0): no match found",
+ "aab": "",
+ "bba": "1:1 (0): no match found",
+ "aabbaba": "1:1 (0): no match found",
+ "bbaabaaabbbb": "",
+ "abc": "1:1 (0): no match found",
+ "c": "1:1 (0): no match found",
+ "d": "",
+ "cd": "",
+ "dc": "1:1 (0): no match found",
+ "dcddcc": "1:1 (0): no match found",
+ "dcddccdd": "",
+}
+
+func TestAndNot(t *testing.T) {
+ for tc, exp := range cases {
+ _, err := Parse("", []byte(tc))
+ var got string
+ if err != nil {
+ got = err.Error()
+ }
+ if got != exp {
+ t.Errorf("%q: want %v, got %v", tc, exp, got)
+ }
+ }
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/test/issue_1/issue_1.go b/vendor/github.com/PuerkitoBio/pigeon/test/issue_1/issue_1.go
new file mode 100644
index 0000000000..2a1f1b45b5
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/test/issue_1/issue_1.go
@@ -0,0 +1,971 @@
+package main
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+func main() {
+ ast, err := Parse("STDIN", []byte("foo"))
+ if err != nil {
+ fmt.Printf("error: %s\n", err)
+ return
+ }
+ fmt.Printf("%+v\n", ast)
+}
+
+var g = &grammar{
+ rules: []*rule{
+ {
+ name: "TableRef",
+ pos: position{line: 14, col: 1, offset: 174},
+ expr: &actionExpr{
+ pos: position{line: 14, col: 13, offset: 186},
+ run: (*parser).callonTableRef1,
+ expr: &seqExpr{
+ pos: position{line: 14, col: 13, offset: 186},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 14, col: 13, offset: 186},
+ label: "database",
+ expr: &zeroOrOneExpr{
+ pos: position{line: 14, col: 22, offset: 195},
+ expr: &seqExpr{
+ pos: position{line: 14, col: 23, offset: 196},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 14, col: 23, offset: 196},
+ name: "ID",
+ },
+ &litMatcher{
+ pos: position{line: 14, col: 26, offset: 199},
+ val: ".",
+ ignoreCase: false,
+ },
+ },
+ },
+ },
+ },
+ &labeledExpr{
+ pos: position{line: 14, col: 32, offset: 205},
+ label: "table",
+ expr: &ruleRefExpr{
+ pos: position{line: 14, col: 38, offset: 211},
+ name: "ID",
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "ID",
+ pos: position{line: 15, col: 1, offset: 271},
+ expr: &actionExpr{
+ pos: position{line: 15, col: 7, offset: 277},
+ run: (*parser).callonID1,
+ expr: &oneOrMoreExpr{
+ pos: position{line: 15, col: 7, offset: 277},
+ expr: &charClassMatcher{
+ pos: position{line: 15, col: 7, offset: 277},
+ val: "[a-z]",
+ ranges: []rune{'a', 'z'},
+ ignoreCase: false,
+ inverted: false,
+ },
+ },
+ },
+ },
+ },
+}
+
+func (c *current) onTableRef1(database, table interface{}) (interface{}, error) {
+ return fmt.Sprintf("%v.%s", database, table), nil
+}
+
+func (p *parser) callonTableRef1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onTableRef1(stack["database"], stack["table"])
+}
+
+func (c *current) onID1() (interface{}, error) {
+ return c.text, nil
+}
+
+func (p *parser) callonID1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onID1()
+}
+
+var (
+ // errNoRule is returned when the grammar to parse has no rule.
+ errNoRule = errors.New("grammar has no rule")
+
+ // errInvalidEncoding is returned when the source is not properly
+ // utf8-encoded.
+ errInvalidEncoding = errors.New("invalid encoding")
+
+ // errNoMatch is returned if no match could be found.
+ errNoMatch = errors.New("no match found")
+)
+
+// Option is a function that can set an option on the parser. It returns
+// the previous setting as an Option.
+type Option func(*parser) Option
+
+// Debug creates an Option to set the debug flag to b. When set to true,
+// debugging information is printed to stdout while parsing.
+//
+// The default is false.
+func Debug(b bool) Option {
+ return func(p *parser) Option {
+ old := p.debug
+ p.debug = b
+ return Debug(old)
+ }
+}
+
+// Memoize creates an Option to set the memoize flag to b. When set to true,
+// the parser will cache all results so each expression is evaluated only
+// once. This guarantees linear parsing time even for pathological cases,
+// at the expense of more memory and slower times for typical cases.
+//
+// The default is false.
+func Memoize(b bool) Option {
+ return func(p *parser) Option {
+ old := p.memoize
+ p.memoize = b
+ return Memoize(old)
+ }
+}
+
+// Recover creates an Option to set the recover flag to b. When set to
+// true, this causes the parser to recover from panics and convert it
+// to an error. Setting it to false can be useful while debugging to
+// access the full stack trace.
+//
+// The default is true.
+func Recover(b bool) Option {
+ return func(p *parser) Option {
+ old := p.recover
+ p.recover = b
+ return Recover(old)
+ }
+}
+
+// ParseFile parses the file identified by filename.
+func ParseFile(filename string, opts ...Option) (interface{}, error) {
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ return ParseReader(filename, f, opts...)
+}
+
+// ParseReader parses the data from r using filename as information in the
+// error messages.
+func ParseReader(filename string, r io.Reader, opts ...Option) (interface{}, error) {
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+
+ return Parse(filename, b, opts...)
+}
+
+// Parse parses the data from b using filename as information in the
+// error messages.
+func Parse(filename string, b []byte, opts ...Option) (interface{}, error) {
+ return newParser(filename, b, opts...).parse(g)
+}
+
+// position records a position in the text.
+type position struct {
+ line, col, offset int
+}
+
+func (p position) String() string {
+ return fmt.Sprintf("%d:%d [%d]", p.line, p.col, p.offset)
+}
+
+// savepoint stores all state required to go back to this point in the
+// parser.
+type savepoint struct {
+ position
+ rn rune
+ w int
+}
+
+type current struct {
+ pos position // start position of the match
+ text []byte // raw text of the match
+}
+
+// the AST types...
+
+type grammar struct {
+ pos position
+ rules []*rule
+}
+
+type rule struct {
+ pos position
+ name string
+ displayName string
+ expr interface{}
+}
+
+type choiceExpr struct {
+ pos position
+ alternatives []interface{}
+}
+
+type actionExpr struct {
+ pos position
+ expr interface{}
+ run func(*parser) (interface{}, error)
+}
+
+type seqExpr struct {
+ pos position
+ exprs []interface{}
+}
+
+type labeledExpr struct {
+ pos position
+ label string
+ expr interface{}
+}
+
+type expr struct {
+ pos position
+ expr interface{}
+}
+
+type andExpr expr
+type notExpr expr
+type zeroOrOneExpr expr
+type zeroOrMoreExpr expr
+type oneOrMoreExpr expr
+
+type ruleRefExpr struct {
+ pos position
+ name string
+}
+
+type andCodeExpr struct {
+ pos position
+ run func(*parser) (bool, error)
+}
+
+type notCodeExpr struct {
+ pos position
+ run func(*parser) (bool, error)
+}
+
+type litMatcher struct {
+ pos position
+ val string
+ ignoreCase bool
+}
+
+type charClassMatcher struct {
+ pos position
+ val string
+ chars []rune
+ ranges []rune
+ classes []*unicode.RangeTable
+ ignoreCase bool
+ inverted bool
+}
+
+type anyMatcher position
+
+// errList cumulates the errors found by the parser.
+type errList []error
+
+func (e *errList) add(err error) {
+ *e = append(*e, err)
+}
+
+func (e errList) err() error {
+ if len(e) == 0 {
+ return nil
+ }
+ e.dedupe()
+ return e
+}
+
+func (e *errList) dedupe() {
+ var cleaned []error
+ set := make(map[string]bool)
+ for _, err := range *e {
+ if msg := err.Error(); !set[msg] {
+ set[msg] = true
+ cleaned = append(cleaned, err)
+ }
+ }
+ *e = cleaned
+}
+
+func (e errList) Error() string {
+ switch len(e) {
+ case 0:
+ return ""
+ case 1:
+ return e[0].Error()
+ default:
+ var buf bytes.Buffer
+
+ for i, err := range e {
+ if i > 0 {
+ buf.WriteRune('\n')
+ }
+ buf.WriteString(err.Error())
+ }
+ return buf.String()
+ }
+}
+
+// parserError wraps an error with a prefix indicating the rule in which
+// the error occurred. The original error is stored in the Inner field.
+type parserError struct {
+ Inner error
+ pos position
+ prefix string
+}
+
+// Error returns the error message.
+func (p *parserError) Error() string {
+ return p.prefix + ": " + p.Inner.Error()
+}
+
+// newParser creates a parser with the specified input source and options.
+func newParser(filename string, b []byte, opts ...Option) *parser {
+ p := &parser{
+ filename: filename,
+ errs: new(errList),
+ data: b,
+ pt: savepoint{position: position{line: 1}},
+ recover: true,
+ }
+ p.setOptions(opts)
+ return p
+}
+
+// setOptions applies the options to the parser.
+func (p *parser) setOptions(opts []Option) {
+ for _, opt := range opts {
+ opt(p)
+ }
+}
+
+type resultTuple struct {
+ v interface{}
+ b bool
+ end savepoint
+}
+
+type parser struct {
+ filename string
+ pt savepoint
+ cur current
+
+ data []byte
+ errs *errList
+
+ recover bool
+ debug bool
+ depth int
+
+ memoize bool
+ // memoization table for the packrat algorithm:
+ // map[offset in source] map[expression or rule] {value, match}
+ memo map[int]map[interface{}]resultTuple
+
+ // rules table, maps the rule identifier to the rule node
+ rules map[string]*rule
+ // variables stack, map of label to value
+ vstack []map[string]interface{}
+ // rule stack, allows identification of the current rule in errors
+ rstack []*rule
+
+ // stats
+ exprCnt int
+}
+
+// push a variable set on the vstack.
+func (p *parser) pushV() {
+ if cap(p.vstack) == len(p.vstack) {
+ // create new empty slot in the stack
+ p.vstack = append(p.vstack, nil)
+ } else {
+ // slice to 1 more
+ p.vstack = p.vstack[:len(p.vstack)+1]
+ }
+
+ // get the last args set
+ m := p.vstack[len(p.vstack)-1]
+ if m != nil && len(m) == 0 {
+ // empty map, all good
+ return
+ }
+
+ m = make(map[string]interface{})
+ p.vstack[len(p.vstack)-1] = m
+}
+
+// pop a variable set from the vstack.
+func (p *parser) popV() {
+ // if the map is not empty, clear it
+ m := p.vstack[len(p.vstack)-1]
+ if len(m) > 0 {
+ // GC that map
+ p.vstack[len(p.vstack)-1] = nil
+ }
+ p.vstack = p.vstack[:len(p.vstack)-1]
+}
+
+func (p *parser) print(prefix, s string) string {
+ if !p.debug {
+ return s
+ }
+
+ fmt.Printf("%s %d:%d:%d: %s [%#U]\n",
+ prefix, p.pt.line, p.pt.col, p.pt.offset, s, p.pt.rn)
+ return s
+}
+
+func (p *parser) in(s string) string {
+ p.depth++
+ return p.print(strings.Repeat(" ", p.depth)+">", s)
+}
+
+func (p *parser) out(s string) string {
+ p.depth--
+ return p.print(strings.Repeat(" ", p.depth)+"<", s)
+}
+
+func (p *parser) addErr(err error) {
+ p.addErrAt(err, p.pt.position)
+}
+
+func (p *parser) addErrAt(err error, pos position) {
+ var buf bytes.Buffer
+ if p.filename != "" {
+ buf.WriteString(p.filename)
+ }
+ if buf.Len() > 0 {
+ buf.WriteString(":")
+ }
+ buf.WriteString(fmt.Sprintf("%d:%d (%d)", pos.line, pos.col, pos.offset))
+ if len(p.rstack) > 0 {
+ if buf.Len() > 0 {
+ buf.WriteString(": ")
+ }
+ rule := p.rstack[len(p.rstack)-1]
+ if rule.displayName != "" {
+ buf.WriteString("rule " + rule.displayName)
+ } else {
+ buf.WriteString("rule " + rule.name)
+ }
+ }
+ pe := &parserError{Inner: err, prefix: buf.String()}
+ p.errs.add(pe)
+}
+
+// read advances the parser to the next rune.
+func (p *parser) read() {
+ p.pt.offset += p.pt.w
+ rn, n := utf8.DecodeRune(p.data[p.pt.offset:])
+ p.pt.rn = rn
+ p.pt.w = n
+ p.pt.col++
+ if rn == '\n' {
+ p.pt.line++
+ p.pt.col = 0
+ }
+
+ if rn == utf8.RuneError {
+ if n > 0 {
+ p.addErr(errInvalidEncoding)
+ }
+ }
+}
+
+// restore parser position to the savepoint pt.
+func (p *parser) restore(pt savepoint) {
+ if p.debug {
+ defer p.out(p.in("restore"))
+ }
+ if pt.offset == p.pt.offset {
+ return
+ }
+ p.pt = pt
+}
+
+// get the slice of bytes from the savepoint start to the current position.
+func (p *parser) sliceFrom(start savepoint) []byte {
+ return p.data[start.position.offset:p.pt.position.offset]
+}
+
+func (p *parser) getMemoized(node interface{}) (resultTuple, bool) {
+ if len(p.memo) == 0 {
+ return resultTuple{}, false
+ }
+ m := p.memo[p.pt.offset]
+ if len(m) == 0 {
+ return resultTuple{}, false
+ }
+ res, ok := m[node]
+ return res, ok
+}
+
+func (p *parser) setMemoized(pt savepoint, node interface{}, tuple resultTuple) {
+ if p.memo == nil {
+ p.memo = make(map[int]map[interface{}]resultTuple)
+ }
+ m := p.memo[pt.offset]
+ if m == nil {
+ m = make(map[interface{}]resultTuple)
+ p.memo[pt.offset] = m
+ }
+ m[node] = tuple
+}
+
+func (p *parser) buildRulesTable(g *grammar) {
+ p.rules = make(map[string]*rule, len(g.rules))
+ for _, r := range g.rules {
+ p.rules[r.name] = r
+ }
+}
+
+func (p *parser) parse(g *grammar) (val interface{}, err error) {
+ if len(g.rules) == 0 {
+ p.addErr(errNoRule)
+ return nil, p.errs.err()
+ }
+
+ // TODO : not super critical but this could be generated
+ p.buildRulesTable(g)
+
+ if p.recover {
+ // panic can be used in action code to stop parsing immediately
+ // and return the panic as an error.
+ defer func() {
+ if e := recover(); e != nil {
+ if p.debug {
+ defer p.out(p.in("panic handler"))
+ }
+ val = nil
+ switch e := e.(type) {
+ case error:
+ p.addErr(e)
+ default:
+ p.addErr(fmt.Errorf("%v", e))
+ }
+ err = p.errs.err()
+ }
+ }()
+ }
+
+ // start rule is rule [0]
+ p.read() // advance to first rune
+ val, ok := p.parseRule(g.rules[0])
+ if !ok {
+ if len(*p.errs) == 0 {
+ // make sure this doesn't go out silently
+ p.addErr(errNoMatch)
+ }
+ return nil, p.errs.err()
+ }
+ return val, p.errs.err()
+}
+
+func (p *parser) parseRule(rule *rule) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseRule " + rule.name))
+ }
+
+ if p.memoize {
+ res, ok := p.getMemoized(rule)
+ if ok {
+ p.restore(res.end)
+ return res.v, res.b
+ }
+ }
+
+ start := p.pt
+ p.rstack = append(p.rstack, rule)
+ p.pushV()
+ val, ok := p.parseExpr(rule.expr)
+ p.popV()
+ p.rstack = p.rstack[:len(p.rstack)-1]
+ if ok && p.debug {
+ p.print(strings.Repeat(" ", p.depth)+"MATCH", string(p.sliceFrom(start)))
+ }
+
+ if p.memoize {
+ p.setMemoized(start, rule, resultTuple{val, ok, p.pt})
+ }
+ return val, ok
+}
+
+func (p *parser) parseExpr(expr interface{}) (interface{}, bool) {
+ var pt savepoint
+ var ok bool
+
+ if p.memoize {
+ res, ok := p.getMemoized(expr)
+ if ok {
+ p.restore(res.end)
+ return res.v, res.b
+ }
+ pt = p.pt
+ }
+
+ p.exprCnt++
+ var val interface{}
+ switch expr := expr.(type) {
+ case *actionExpr:
+ val, ok = p.parseActionExpr(expr)
+ case *andCodeExpr:
+ val, ok = p.parseAndCodeExpr(expr)
+ case *andExpr:
+ val, ok = p.parseAndExpr(expr)
+ case *anyMatcher:
+ val, ok = p.parseAnyMatcher(expr)
+ case *charClassMatcher:
+ val, ok = p.parseCharClassMatcher(expr)
+ case *choiceExpr:
+ val, ok = p.parseChoiceExpr(expr)
+ case *labeledExpr:
+ val, ok = p.parseLabeledExpr(expr)
+ case *litMatcher:
+ val, ok = p.parseLitMatcher(expr)
+ case *notCodeExpr:
+ val, ok = p.parseNotCodeExpr(expr)
+ case *notExpr:
+ val, ok = p.parseNotExpr(expr)
+ case *oneOrMoreExpr:
+ val, ok = p.parseOneOrMoreExpr(expr)
+ case *ruleRefExpr:
+ val, ok = p.parseRuleRefExpr(expr)
+ case *seqExpr:
+ val, ok = p.parseSeqExpr(expr)
+ case *zeroOrMoreExpr:
+ val, ok = p.parseZeroOrMoreExpr(expr)
+ case *zeroOrOneExpr:
+ val, ok = p.parseZeroOrOneExpr(expr)
+ default:
+ panic(fmt.Sprintf("unknown expression type %T", expr))
+ }
+ if p.memoize {
+ p.setMemoized(pt, expr, resultTuple{val, ok, p.pt})
+ }
+ return val, ok
+}
+
+func (p *parser) parseActionExpr(act *actionExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseActionExpr"))
+ }
+
+ start := p.pt
+ val, ok := p.parseExpr(act.expr)
+ if ok {
+ p.cur.pos = start.position
+ p.cur.text = p.sliceFrom(start)
+ actVal, err := act.run(p)
+ if err != nil {
+ p.addErrAt(err, start.position)
+ }
+ val = actVal
+ }
+ if ok && p.debug {
+ p.print(strings.Repeat(" ", p.depth)+"MATCH", string(p.sliceFrom(start)))
+ }
+ return val, ok
+}
+
+func (p *parser) parseAndCodeExpr(and *andCodeExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseAndCodeExpr"))
+ }
+
+ ok, err := and.run(p)
+ if err != nil {
+ p.addErr(err)
+ }
+ return nil, ok
+}
+
+func (p *parser) parseAndExpr(and *andExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseAndExpr"))
+ }
+
+ pt := p.pt
+ p.pushV()
+ _, ok := p.parseExpr(and.expr)
+ p.popV()
+ p.restore(pt)
+ return nil, ok
+}
+
+func (p *parser) parseAnyMatcher(any *anyMatcher) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseAnyMatcher"))
+ }
+
+ if p.pt.rn != utf8.RuneError {
+ start := p.pt
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ return nil, false
+}
+
+func (p *parser) parseCharClassMatcher(chr *charClassMatcher) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseCharClassMatcher"))
+ }
+
+ cur := p.pt.rn
+ // can't match EOF
+ if cur == utf8.RuneError {
+ return nil, false
+ }
+ start := p.pt
+ if chr.ignoreCase {
+ cur = unicode.ToLower(cur)
+ }
+
+ // try to match in the list of available chars
+ for _, rn := range chr.chars {
+ if rn == cur {
+ if chr.inverted {
+ return nil, false
+ }
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ }
+
+ // try to match in the list of ranges
+ for i := 0; i < len(chr.ranges); i += 2 {
+ if cur >= chr.ranges[i] && cur <= chr.ranges[i+1] {
+ if chr.inverted {
+ return nil, false
+ }
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ }
+
+ // try to match in the list of Unicode classes
+ for _, cl := range chr.classes {
+ if unicode.Is(cl, cur) {
+ if chr.inverted {
+ return nil, false
+ }
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ }
+
+ if chr.inverted {
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ return nil, false
+}
+
+func (p *parser) parseChoiceExpr(ch *choiceExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseChoiceExpr"))
+ }
+
+ for _, alt := range ch.alternatives {
+ p.pushV()
+ val, ok := p.parseExpr(alt)
+ p.popV()
+ if ok {
+ return val, ok
+ }
+ }
+ return nil, false
+}
+
+func (p *parser) parseLabeledExpr(lab *labeledExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseLabeledExpr"))
+ }
+
+ p.pushV()
+ val, ok := p.parseExpr(lab.expr)
+ p.popV()
+ if ok && lab.label != "" {
+ m := p.vstack[len(p.vstack)-1]
+ m[lab.label] = val
+ }
+ return val, ok
+}
+
+func (p *parser) parseLitMatcher(lit *litMatcher) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseLitMatcher"))
+ }
+
+ start := p.pt
+ for _, want := range lit.val {
+ cur := p.pt.rn
+ if lit.ignoreCase {
+ cur = unicode.ToLower(cur)
+ }
+ if cur != want {
+ p.restore(start)
+ return nil, false
+ }
+ p.read()
+ }
+ return p.sliceFrom(start), true
+}
+
+func (p *parser) parseNotCodeExpr(not *notCodeExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseNotCodeExpr"))
+ }
+
+ ok, err := not.run(p)
+ if err != nil {
+ p.addErr(err)
+ }
+ return nil, !ok
+}
+
+func (p *parser) parseNotExpr(not *notExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseNotExpr"))
+ }
+
+ pt := p.pt
+ p.pushV()
+ _, ok := p.parseExpr(not.expr)
+ p.popV()
+ p.restore(pt)
+ return nil, !ok
+}
+
+func (p *parser) parseOneOrMoreExpr(expr *oneOrMoreExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseOneOrMoreExpr"))
+ }
+
+ var vals []interface{}
+
+ for {
+ p.pushV()
+ val, ok := p.parseExpr(expr.expr)
+ p.popV()
+ if !ok {
+ if len(vals) == 0 {
+ // did not match once, no match
+ return nil, false
+ }
+ return vals, true
+ }
+ vals = append(vals, val)
+ }
+}
+
+func (p *parser) parseRuleRefExpr(ref *ruleRefExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseRuleRefExpr " + ref.name))
+ }
+
+ if ref.name == "" {
+ panic(fmt.Sprintf("%s: invalid rule: missing name", ref.pos))
+ }
+
+ rule := p.rules[ref.name]
+ if rule == nil {
+ p.addErr(fmt.Errorf("undefined rule: %s", ref.name))
+ return nil, false
+ }
+ return p.parseRule(rule)
+}
+
+func (p *parser) parseSeqExpr(seq *seqExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseSeqExpr"))
+ }
+
+ var vals []interface{}
+
+ pt := p.pt
+ for _, expr := range seq.exprs {
+ val, ok := p.parseExpr(expr)
+ if !ok {
+ p.restore(pt)
+ return nil, false
+ }
+ vals = append(vals, val)
+ }
+ return vals, true
+}
+
+func (p *parser) parseZeroOrMoreExpr(expr *zeroOrMoreExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseZeroOrMoreExpr"))
+ }
+
+ var vals []interface{}
+
+ for {
+ p.pushV()
+ val, ok := p.parseExpr(expr.expr)
+ p.popV()
+ if !ok {
+ return vals, true
+ }
+ vals = append(vals, val)
+ }
+}
+
+func (p *parser) parseZeroOrOneExpr(expr *zeroOrOneExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseZeroOrOneExpr"))
+ }
+
+ p.pushV()
+ val, _ := p.parseExpr(expr.expr)
+ p.popV()
+ // whether it matched or not, consider it a match
+ return val, true
+}
+
+func rangeTable(class string) *unicode.RangeTable {
+ if rt, ok := unicode.Categories[class]; ok {
+ return rt
+ }
+ if rt, ok := unicode.Properties[class]; ok {
+ return rt
+ }
+ if rt, ok := unicode.Scripts[class]; ok {
+ return rt
+ }
+
+ // cannot happen
+ panic(fmt.Sprintf("invalid Unicode class: %s", class))
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/test/issue_1/issue_1.peg b/vendor/github.com/PuerkitoBio/pigeon/test/issue_1/issue_1.peg
new file mode 100644
index 0000000000..803d08a32e
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/test/issue_1/issue_1.peg
@@ -0,0 +1,16 @@
+{
+package main
+
+func main() {
+ ast, err := Parse("STDIN", []byte("foo"))
+ if err != nil {
+ fmt.Printf("error: %s\n", err)
+ return
+ }
+ fmt.Printf("%+v\n", ast)
+}
+}
+
+TableRef <- database:(ID '.')? table:ID { return fmt.Sprintf("%v.%s", database, table), nil }
+ID <- [a-z]+ { return c.text, nil }
+
diff --git a/vendor/github.com/PuerkitoBio/pigeon/test/issue_1/issue_1_test.go b/vendor/github.com/PuerkitoBio/pigeon/test/issue_1/issue_1_test.go
new file mode 100644
index 0000000000..f2310e8d2f
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/test/issue_1/issue_1_test.go
@@ -0,0 +1,38 @@
+package main
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestRunIssue1(t *testing.T) {
+ got, err := Parse("", []byte("foo"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := ".foo"
+ gots := got.(string)
+ if gots != want {
+ t.Errorf("want %q, got %q", want, gots)
+ }
+}
+
+func TestIssue1(t *testing.T) {
+ methods := map[string][]string{
+ "onTableRef1": {"database", "table"},
+ "onID1": {},
+ }
+
+ typ := reflect.TypeOf(¤t{})
+ for nm, args := range methods {
+ meth, ok := typ.MethodByName(nm)
+ if !ok {
+ t.Errorf("want *current to have method %s", nm)
+ continue
+ }
+ if n := meth.Func.Type().NumIn(); n != len(args)+1 {
+ t.Errorf("%q: want %d arguments, got %d", nm, len(args)+1, n)
+ continue
+ }
+ }
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/test/linear/linear.go b/vendor/github.com/PuerkitoBio/pigeon/test/linear/linear.go
new file mode 100644
index 0000000000..6e03f882e2
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/test/linear/linear.go
@@ -0,0 +1,1058 @@
+package main
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+func main() {
+}
+
+var g = &grammar{
+ rules: []*rule{
+ {
+ name: "File",
+ pos: position{line: 10, col: 1, offset: 102},
+ expr: &seqExpr{
+ pos: position{line: 10, col: 8, offset: 111},
+ exprs: []interface{}{
+ &choiceExpr{
+ pos: position{line: 10, col: 10, offset: 113},
+ alternatives: []interface{}{
+ &seqExpr{
+ pos: position{line: 10, col: 10, offset: 113},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 10, col: 10, offset: 113},
+ name: "L",
+ },
+ &zeroOrOneExpr{
+ pos: position{line: 10, col: 12, offset: 115},
+ expr: &ruleRefExpr{
+ pos: position{line: 10, col: 12, offset: 115},
+ name: "S",
+ },
+ },
+ },
+ },
+ &seqExpr{
+ pos: position{line: 10, col: 17, offset: 120},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 10, col: 17, offset: 120},
+ name: "L",
+ },
+ &zeroOrOneExpr{
+ pos: position{line: 10, col: 19, offset: 122},
+ expr: &ruleRefExpr{
+ pos: position{line: 10, col: 19, offset: 122},
+ name: "N",
+ },
+ },
+ },
+ },
+ &seqExpr{
+ pos: position{line: 10, col: 24, offset: 127},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 10, col: 24, offset: 127},
+ name: "N",
+ },
+ &zeroOrOneExpr{
+ pos: position{line: 10, col: 26, offset: 129},
+ expr: &ruleRefExpr{
+ pos: position{line: 10, col: 26, offset: 129},
+ name: "L",
+ },
+ },
+ },
+ },
+ &seqExpr{
+ pos: position{line: 10, col: 31, offset: 134},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 10, col: 31, offset: 134},
+ name: "N",
+ },
+ &zeroOrOneExpr{
+ pos: position{line: 10, col: 33, offset: 136},
+ expr: &ruleRefExpr{
+ pos: position{line: 10, col: 33, offset: 136},
+ name: "S",
+ },
+ },
+ },
+ },
+ &seqExpr{
+ pos: position{line: 10, col: 38, offset: 141},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 10, col: 38, offset: 141},
+ name: "S",
+ },
+ &zeroOrOneExpr{
+ pos: position{line: 10, col: 40, offset: 143},
+ expr: &ruleRefExpr{
+ pos: position{line: 10, col: 40, offset: 143},
+ name: "L",
+ },
+ },
+ },
+ },
+ &seqExpr{
+ pos: position{line: 10, col: 45, offset: 148},
+ exprs: []interface{}{
+ &ruleRefExpr{
+ pos: position{line: 10, col: 45, offset: 148},
+ name: "S",
+ },
+ &zeroOrOneExpr{
+ pos: position{line: 10, col: 47, offset: 150},
+ expr: &ruleRefExpr{
+ pos: position{line: 10, col: 47, offset: 150},
+ name: "N",
+ },
+ },
+ },
+ },
+ },
+ },
+ &zeroOrMoreExpr{
+ pos: position{line: 10, col: 52, offset: 155},
+ expr: &ruleRefExpr{
+ pos: position{line: 10, col: 52, offset: 155},
+ name: "File",
+ },
+ },
+ &ruleRefExpr{
+ pos: position{line: 10, col: 58, offset: 161},
+ name: "EOF",
+ },
+ },
+ },
+ },
+ {
+ name: "L",
+ pos: position{line: 11, col: 1, offset: 165},
+ expr: &oneOrMoreExpr{
+ pos: position{line: 11, col: 5, offset: 171},
+ expr: &charClassMatcher{
+ pos: position{line: 11, col: 5, offset: 171},
+ val: "[a-z]i",
+ ranges: []rune{'a', 'z'},
+ ignoreCase: true,
+ inverted: false,
+ },
+ },
+ },
+ {
+ name: "N",
+ pos: position{line: 12, col: 1, offset: 179},
+ expr: &oneOrMoreExpr{
+ pos: position{line: 12, col: 5, offset: 185},
+ expr: &charClassMatcher{
+ pos: position{line: 12, col: 5, offset: 185},
+ val: "[0-9]",
+ ranges: []rune{'0', '9'},
+ ignoreCase: false,
+ inverted: false,
+ },
+ },
+ },
+ {
+ name: "S",
+ pos: position{line: 13, col: 1, offset: 192},
+ expr: &oneOrMoreExpr{
+ pos: position{line: 13, col: 5, offset: 198},
+ expr: &charClassMatcher{
+ pos: position{line: 13, col: 5, offset: 198},
+ val: "[/+=]",
+ chars: []rune{'/', '+', '='},
+ ignoreCase: false,
+ inverted: false,
+ },
+ },
+ },
+ {
+ name: "EOF",
+ pos: position{line: 14, col: 1, offset: 205},
+ expr: ¬Expr{
+ pos: position{line: 14, col: 7, offset: 213},
+ expr: &anyMatcher{
+ line: 14, col: 8, offset: 214,
+ },
+ },
+ },
+ },
+}
+
+var (
+ // errNoRule is returned when the grammar to parse has no rule.
+ errNoRule = errors.New("grammar has no rule")
+
+ // errInvalidEncoding is returned when the source is not properly
+ // utf8-encoded.
+ errInvalidEncoding = errors.New("invalid encoding")
+
+ // errNoMatch is returned if no match could be found.
+ errNoMatch = errors.New("no match found")
+)
+
+// Option is a function that can set an option on the parser. It returns
+// the previous setting as an Option.
+type Option func(*parser) Option
+
+// Debug creates an Option to set the debug flag to b. When set to true,
+// debugging information is printed to stdout while parsing.
+//
+// The default is false.
+func Debug(b bool) Option {
+ return func(p *parser) Option {
+ old := p.debug
+ p.debug = b
+ return Debug(old)
+ }
+}
+
+// Memoize creates an Option to set the memoize flag to b. When set to true,
+// the parser will cache all results so each expression is evaluated only
+// once. This guarantees linear parsing time even for pathological cases,
+// at the expense of more memory and slower times for typical cases.
+//
+// The default is false.
+func Memoize(b bool) Option {
+ return func(p *parser) Option {
+ old := p.memoize
+ p.memoize = b
+ return Memoize(old)
+ }
+}
+
+// Recover creates an Option to set the recover flag to b. When set to
+// true, this causes the parser to recover from panics and convert it
+// to an error. Setting it to false can be useful while debugging to
+// access the full stack trace.
+//
+// The default is true.
+func Recover(b bool) Option {
+ return func(p *parser) Option {
+ old := p.recover
+ p.recover = b
+ return Recover(old)
+ }
+}
+
+// ParseFile parses the file identified by filename.
+func ParseFile(filename string, opts ...Option) (interface{}, error) {
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ return ParseReader(filename, f, opts...)
+}
+
+// ParseReader parses the data from r using filename as information in the
+// error messages.
+func ParseReader(filename string, r io.Reader, opts ...Option) (interface{}, error) {
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+
+ return Parse(filename, b, opts...)
+}
+
+// Parse parses the data from b using filename as information in the
+// error messages.
+func Parse(filename string, b []byte, opts ...Option) (interface{}, error) {
+ return newParser(filename, b, opts...).parse(g)
+}
+
+// position records a position in the text.
+type position struct {
+ line, col, offset int
+}
+
+func (p position) String() string {
+ return fmt.Sprintf("%d:%d [%d]", p.line, p.col, p.offset)
+}
+
+// savepoint stores all state required to go back to this point in the
+// parser.
+type savepoint struct {
+ position
+ rn rune
+ w int
+}
+
+type current struct {
+ pos position // start position of the match
+ text []byte // raw text of the match
+}
+
+// the AST types...
+
+type grammar struct {
+ pos position
+ rules []*rule
+}
+
+type rule struct {
+ pos position
+ name string
+ displayName string
+ expr interface{}
+}
+
+type choiceExpr struct {
+ pos position
+ alternatives []interface{}
+}
+
+type actionExpr struct {
+ pos position
+ expr interface{}
+ run func(*parser) (interface{}, error)
+}
+
+type seqExpr struct {
+ pos position
+ exprs []interface{}
+}
+
+type labeledExpr struct {
+ pos position
+ label string
+ expr interface{}
+}
+
+type expr struct {
+ pos position
+ expr interface{}
+}
+
+type andExpr expr
+type notExpr expr
+type zeroOrOneExpr expr
+type zeroOrMoreExpr expr
+type oneOrMoreExpr expr
+
+type ruleRefExpr struct {
+ pos position
+ name string
+}
+
+type andCodeExpr struct {
+ pos position
+ run func(*parser) (bool, error)
+}
+
+type notCodeExpr struct {
+ pos position
+ run func(*parser) (bool, error)
+}
+
+type litMatcher struct {
+ pos position
+ val string
+ ignoreCase bool
+}
+
+type charClassMatcher struct {
+ pos position
+ val string
+ chars []rune
+ ranges []rune
+ classes []*unicode.RangeTable
+ ignoreCase bool
+ inverted bool
+}
+
+type anyMatcher position
+
+// errList cumulates the errors found by the parser.
+type errList []error
+
+func (e *errList) add(err error) {
+ *e = append(*e, err)
+}
+
+func (e errList) err() error {
+ if len(e) == 0 {
+ return nil
+ }
+ e.dedupe()
+ return e
+}
+
+func (e *errList) dedupe() {
+ var cleaned []error
+ set := make(map[string]bool)
+ for _, err := range *e {
+ if msg := err.Error(); !set[msg] {
+ set[msg] = true
+ cleaned = append(cleaned, err)
+ }
+ }
+ *e = cleaned
+}
+
+func (e errList) Error() string {
+ switch len(e) {
+ case 0:
+ return ""
+ case 1:
+ return e[0].Error()
+ default:
+ var buf bytes.Buffer
+
+ for i, err := range e {
+ if i > 0 {
+ buf.WriteRune('\n')
+ }
+ buf.WriteString(err.Error())
+ }
+ return buf.String()
+ }
+}
+
+// parserError wraps an error with a prefix indicating the rule in which
+// the error occurred. The original error is stored in the Inner field.
+type parserError struct {
+ Inner error
+ pos position
+ prefix string
+}
+
+// Error returns the error message.
+func (p *parserError) Error() string {
+ return p.prefix + ": " + p.Inner.Error()
+}
+
+// newParser creates a parser with the specified input source and options.
+func newParser(filename string, b []byte, opts ...Option) *parser {
+ p := &parser{
+ filename: filename,
+ errs: new(errList),
+ data: b,
+ pt: savepoint{position: position{line: 1}},
+ recover: true,
+ }
+ p.setOptions(opts)
+ return p
+}
+
+// setOptions applies the options to the parser.
+func (p *parser) setOptions(opts []Option) {
+ for _, opt := range opts {
+ opt(p)
+ }
+}
+
+type resultTuple struct {
+ v interface{}
+ b bool
+ end savepoint
+}
+
+type parser struct {
+ filename string
+ pt savepoint
+ cur current
+
+ data []byte
+ errs *errList
+
+ recover bool
+ debug bool
+ depth int
+
+ memoize bool
+ // memoization table for the packrat algorithm:
+ // map[offset in source] map[expression or rule] {value, match}
+ memo map[int]map[interface{}]resultTuple
+
+ // rules table, maps the rule identifier to the rule node
+ rules map[string]*rule
+ // variables stack, map of label to value
+ vstack []map[string]interface{}
+ // rule stack, allows identification of the current rule in errors
+ rstack []*rule
+
+ // stats
+ exprCnt int
+}
+
+// push a variable set on the vstack.
+func (p *parser) pushV() {
+ if cap(p.vstack) == len(p.vstack) {
+ // create new empty slot in the stack
+ p.vstack = append(p.vstack, nil)
+ } else {
+ // slice to 1 more
+ p.vstack = p.vstack[:len(p.vstack)+1]
+ }
+
+ // get the last args set
+ m := p.vstack[len(p.vstack)-1]
+ if m != nil && len(m) == 0 {
+ // empty map, all good
+ return
+ }
+
+ m = make(map[string]interface{})
+ p.vstack[len(p.vstack)-1] = m
+}
+
+// pop a variable set from the vstack.
+func (p *parser) popV() {
+ // if the map is not empty, clear it
+ m := p.vstack[len(p.vstack)-1]
+ if len(m) > 0 {
+ // GC that map
+ p.vstack[len(p.vstack)-1] = nil
+ }
+ p.vstack = p.vstack[:len(p.vstack)-1]
+}
+
+func (p *parser) print(prefix, s string) string {
+ if !p.debug {
+ return s
+ }
+
+ fmt.Printf("%s %d:%d:%d: %s [%#U]\n",
+ prefix, p.pt.line, p.pt.col, p.pt.offset, s, p.pt.rn)
+ return s
+}
+
+func (p *parser) in(s string) string {
+ p.depth++
+ return p.print(strings.Repeat(" ", p.depth)+">", s)
+}
+
+func (p *parser) out(s string) string {
+ p.depth--
+ return p.print(strings.Repeat(" ", p.depth)+"<", s)
+}
+
+func (p *parser) addErr(err error) {
+ p.addErrAt(err, p.pt.position)
+}
+
+func (p *parser) addErrAt(err error, pos position) {
+ var buf bytes.Buffer
+ if p.filename != "" {
+ buf.WriteString(p.filename)
+ }
+ if buf.Len() > 0 {
+ buf.WriteString(":")
+ }
+ buf.WriteString(fmt.Sprintf("%d:%d (%d)", pos.line, pos.col, pos.offset))
+ if len(p.rstack) > 0 {
+ if buf.Len() > 0 {
+ buf.WriteString(": ")
+ }
+ rule := p.rstack[len(p.rstack)-1]
+ if rule.displayName != "" {
+ buf.WriteString("rule " + rule.displayName)
+ } else {
+ buf.WriteString("rule " + rule.name)
+ }
+ }
+ pe := &parserError{Inner: err, prefix: buf.String()}
+ p.errs.add(pe)
+}
+
+// read advances the parser to the next rune.
+func (p *parser) read() {
+ p.pt.offset += p.pt.w
+ rn, n := utf8.DecodeRune(p.data[p.pt.offset:])
+ p.pt.rn = rn
+ p.pt.w = n
+ p.pt.col++
+ if rn == '\n' {
+ p.pt.line++
+ p.pt.col = 0
+ }
+
+ if rn == utf8.RuneError {
+ if n > 0 {
+ p.addErr(errInvalidEncoding)
+ }
+ }
+}
+
+// restore parser position to the savepoint pt.
+func (p *parser) restore(pt savepoint) {
+ if p.debug {
+ defer p.out(p.in("restore"))
+ }
+ if pt.offset == p.pt.offset {
+ return
+ }
+ p.pt = pt
+}
+
+// get the slice of bytes from the savepoint start to the current position.
+func (p *parser) sliceFrom(start savepoint) []byte {
+ return p.data[start.position.offset:p.pt.position.offset]
+}
+
+func (p *parser) getMemoized(node interface{}) (resultTuple, bool) {
+ if len(p.memo) == 0 {
+ return resultTuple{}, false
+ }
+ m := p.memo[p.pt.offset]
+ if len(m) == 0 {
+ return resultTuple{}, false
+ }
+ res, ok := m[node]
+ return res, ok
+}
+
+func (p *parser) setMemoized(pt savepoint, node interface{}, tuple resultTuple) {
+ if p.memo == nil {
+ p.memo = make(map[int]map[interface{}]resultTuple)
+ }
+ m := p.memo[pt.offset]
+ if m == nil {
+ m = make(map[interface{}]resultTuple)
+ p.memo[pt.offset] = m
+ }
+ m[node] = tuple
+}
+
+func (p *parser) buildRulesTable(g *grammar) {
+ p.rules = make(map[string]*rule, len(g.rules))
+ for _, r := range g.rules {
+ p.rules[r.name] = r
+ }
+}
+
+func (p *parser) parse(g *grammar) (val interface{}, err error) {
+ if len(g.rules) == 0 {
+ p.addErr(errNoRule)
+ return nil, p.errs.err()
+ }
+
+ // TODO : not super critical but this could be generated
+ p.buildRulesTable(g)
+
+ if p.recover {
+ // panic can be used in action code to stop parsing immediately
+ // and return the panic as an error.
+ defer func() {
+ if e := recover(); e != nil {
+ if p.debug {
+ defer p.out(p.in("panic handler"))
+ }
+ val = nil
+ switch e := e.(type) {
+ case error:
+ p.addErr(e)
+ default:
+ p.addErr(fmt.Errorf("%v", e))
+ }
+ err = p.errs.err()
+ }
+ }()
+ }
+
+ // start rule is rule [0]
+ p.read() // advance to first rune
+ val, ok := p.parseRule(g.rules[0])
+ if !ok {
+ if len(*p.errs) == 0 {
+ // make sure this doesn't go out silently
+ p.addErr(errNoMatch)
+ }
+ return nil, p.errs.err()
+ }
+ return val, p.errs.err()
+}
+
+func (p *parser) parseRule(rule *rule) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseRule " + rule.name))
+ }
+
+ if p.memoize {
+ res, ok := p.getMemoized(rule)
+ if ok {
+ p.restore(res.end)
+ return res.v, res.b
+ }
+ }
+
+ start := p.pt
+ p.rstack = append(p.rstack, rule)
+ p.pushV()
+ val, ok := p.parseExpr(rule.expr)
+ p.popV()
+ p.rstack = p.rstack[:len(p.rstack)-1]
+ if ok && p.debug {
+ p.print(strings.Repeat(" ", p.depth)+"MATCH", string(p.sliceFrom(start)))
+ }
+
+ if p.memoize {
+ p.setMemoized(start, rule, resultTuple{val, ok, p.pt})
+ }
+ return val, ok
+}
+
+func (p *parser) parseExpr(expr interface{}) (interface{}, bool) {
+ var pt savepoint
+ var ok bool
+
+ if p.memoize {
+ res, ok := p.getMemoized(expr)
+ if ok {
+ p.restore(res.end)
+ return res.v, res.b
+ }
+ pt = p.pt
+ }
+
+ p.exprCnt++
+ var val interface{}
+ switch expr := expr.(type) {
+ case *actionExpr:
+ val, ok = p.parseActionExpr(expr)
+ case *andCodeExpr:
+ val, ok = p.parseAndCodeExpr(expr)
+ case *andExpr:
+ val, ok = p.parseAndExpr(expr)
+ case *anyMatcher:
+ val, ok = p.parseAnyMatcher(expr)
+ case *charClassMatcher:
+ val, ok = p.parseCharClassMatcher(expr)
+ case *choiceExpr:
+ val, ok = p.parseChoiceExpr(expr)
+ case *labeledExpr:
+ val, ok = p.parseLabeledExpr(expr)
+ case *litMatcher:
+ val, ok = p.parseLitMatcher(expr)
+ case *notCodeExpr:
+ val, ok = p.parseNotCodeExpr(expr)
+ case *notExpr:
+ val, ok = p.parseNotExpr(expr)
+ case *oneOrMoreExpr:
+ val, ok = p.parseOneOrMoreExpr(expr)
+ case *ruleRefExpr:
+ val, ok = p.parseRuleRefExpr(expr)
+ case *seqExpr:
+ val, ok = p.parseSeqExpr(expr)
+ case *zeroOrMoreExpr:
+ val, ok = p.parseZeroOrMoreExpr(expr)
+ case *zeroOrOneExpr:
+ val, ok = p.parseZeroOrOneExpr(expr)
+ default:
+ panic(fmt.Sprintf("unknown expression type %T", expr))
+ }
+ if p.memoize {
+ p.setMemoized(pt, expr, resultTuple{val, ok, p.pt})
+ }
+ return val, ok
+}
+
+func (p *parser) parseActionExpr(act *actionExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseActionExpr"))
+ }
+
+ start := p.pt
+ val, ok := p.parseExpr(act.expr)
+ if ok {
+ p.cur.pos = start.position
+ p.cur.text = p.sliceFrom(start)
+ actVal, err := act.run(p)
+ if err != nil {
+ p.addErrAt(err, start.position)
+ }
+ val = actVal
+ }
+ if ok && p.debug {
+ p.print(strings.Repeat(" ", p.depth)+"MATCH", string(p.sliceFrom(start)))
+ }
+ return val, ok
+}
+
+func (p *parser) parseAndCodeExpr(and *andCodeExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseAndCodeExpr"))
+ }
+
+ ok, err := and.run(p)
+ if err != nil {
+ p.addErr(err)
+ }
+ return nil, ok
+}
+
+func (p *parser) parseAndExpr(and *andExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseAndExpr"))
+ }
+
+ pt := p.pt
+ p.pushV()
+ _, ok := p.parseExpr(and.expr)
+ p.popV()
+ p.restore(pt)
+ return nil, ok
+}
+
+func (p *parser) parseAnyMatcher(any *anyMatcher) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseAnyMatcher"))
+ }
+
+ if p.pt.rn != utf8.RuneError {
+ start := p.pt
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ return nil, false
+}
+
+func (p *parser) parseCharClassMatcher(chr *charClassMatcher) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseCharClassMatcher"))
+ }
+
+ cur := p.pt.rn
+ // can't match EOF
+ if cur == utf8.RuneError {
+ return nil, false
+ }
+ start := p.pt
+ if chr.ignoreCase {
+ cur = unicode.ToLower(cur)
+ }
+
+ // try to match in the list of available chars
+ for _, rn := range chr.chars {
+ if rn == cur {
+ if chr.inverted {
+ return nil, false
+ }
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ }
+
+ // try to match in the list of ranges
+ for i := 0; i < len(chr.ranges); i += 2 {
+ if cur >= chr.ranges[i] && cur <= chr.ranges[i+1] {
+ if chr.inverted {
+ return nil, false
+ }
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ }
+
+ // try to match in the list of Unicode classes
+ for _, cl := range chr.classes {
+ if unicode.Is(cl, cur) {
+ if chr.inverted {
+ return nil, false
+ }
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ }
+
+ if chr.inverted {
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ return nil, false
+}
+
+func (p *parser) parseChoiceExpr(ch *choiceExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseChoiceExpr"))
+ }
+
+ for _, alt := range ch.alternatives {
+ p.pushV()
+ val, ok := p.parseExpr(alt)
+ p.popV()
+ if ok {
+ return val, ok
+ }
+ }
+ return nil, false
+}
+
+func (p *parser) parseLabeledExpr(lab *labeledExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseLabeledExpr"))
+ }
+
+ p.pushV()
+ val, ok := p.parseExpr(lab.expr)
+ p.popV()
+ if ok && lab.label != "" {
+ m := p.vstack[len(p.vstack)-1]
+ m[lab.label] = val
+ }
+ return val, ok
+}
+
+func (p *parser) parseLitMatcher(lit *litMatcher) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseLitMatcher"))
+ }
+
+ start := p.pt
+ for _, want := range lit.val {
+ cur := p.pt.rn
+ if lit.ignoreCase {
+ cur = unicode.ToLower(cur)
+ }
+ if cur != want {
+ p.restore(start)
+ return nil, false
+ }
+ p.read()
+ }
+ return p.sliceFrom(start), true
+}
+
+func (p *parser) parseNotCodeExpr(not *notCodeExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseNotCodeExpr"))
+ }
+
+ ok, err := not.run(p)
+ if err != nil {
+ p.addErr(err)
+ }
+ return nil, !ok
+}
+
+func (p *parser) parseNotExpr(not *notExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseNotExpr"))
+ }
+
+ pt := p.pt
+ p.pushV()
+ _, ok := p.parseExpr(not.expr)
+ p.popV()
+ p.restore(pt)
+ return nil, !ok
+}
+
+func (p *parser) parseOneOrMoreExpr(expr *oneOrMoreExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseOneOrMoreExpr"))
+ }
+
+ var vals []interface{}
+
+ for {
+ p.pushV()
+ val, ok := p.parseExpr(expr.expr)
+ p.popV()
+ if !ok {
+ if len(vals) == 0 {
+ // did not match once, no match
+ return nil, false
+ }
+ return vals, true
+ }
+ vals = append(vals, val)
+ }
+}
+
+func (p *parser) parseRuleRefExpr(ref *ruleRefExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseRuleRefExpr " + ref.name))
+ }
+
+ if ref.name == "" {
+ panic(fmt.Sprintf("%s: invalid rule: missing name", ref.pos))
+ }
+
+ rule := p.rules[ref.name]
+ if rule == nil {
+ p.addErr(fmt.Errorf("undefined rule: %s", ref.name))
+ return nil, false
+ }
+ return p.parseRule(rule)
+}
+
+func (p *parser) parseSeqExpr(seq *seqExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseSeqExpr"))
+ }
+
+ var vals []interface{}
+
+ pt := p.pt
+ for _, expr := range seq.exprs {
+ val, ok := p.parseExpr(expr)
+ if !ok {
+ p.restore(pt)
+ return nil, false
+ }
+ vals = append(vals, val)
+ }
+ return vals, true
+}
+
+func (p *parser) parseZeroOrMoreExpr(expr *zeroOrMoreExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseZeroOrMoreExpr"))
+ }
+
+ var vals []interface{}
+
+ for {
+ p.pushV()
+ val, ok := p.parseExpr(expr.expr)
+ p.popV()
+ if !ok {
+ return vals, true
+ }
+ vals = append(vals, val)
+ }
+}
+
+func (p *parser) parseZeroOrOneExpr(expr *zeroOrOneExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseZeroOrOneExpr"))
+ }
+
+ p.pushV()
+ val, _ := p.parseExpr(expr.expr)
+ p.popV()
+ // whether it matched or not, consider it a match
+ return val, true
+}
+
+func rangeTable(class string) *unicode.RangeTable {
+ if rt, ok := unicode.Categories[class]; ok {
+ return rt
+ }
+ if rt, ok := unicode.Properties[class]; ok {
+ return rt
+ }
+ if rt, ok := unicode.Scripts[class]; ok {
+ return rt
+ }
+
+ // cannot happen
+ panic(fmt.Sprintf("invalid Unicode class: %s", class))
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/test/linear/linear.peg b/vendor/github.com/PuerkitoBio/pigeon/test/linear/linear.peg
new file mode 100644
index 0000000000..22d773243a
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/test/linear/linear.peg
@@ -0,0 +1,14 @@
+{
+ package main
+
+ func main() {
+ }
+}
+
+// any base64 input is good. force some backtracking.
+
+File ← ( L S? / L N? / N L? / N S? / S L? / S N? ) File* EOF
+L ← [a-z]i+
+N ← [0-9]+
+S ← [/+=]+
+EOF ← !.
diff --git a/vendor/github.com/PuerkitoBio/pigeon/test/linear/linear_test.go b/vendor/github.com/PuerkitoBio/pigeon/test/linear/linear_test.go
new file mode 100644
index 0000000000..f50ba3089e
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/test/linear/linear_test.go
@@ -0,0 +1,37 @@
+package main
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/base64"
+ "io"
+ "testing"
+ "time"
+)
+
+func TestLinearTime(t *testing.T) {
+ var buf bytes.Buffer
+
+ sizes := []int64{
+ 1 << 10, // 1Kb
+ 10 << 10, // 10Kb
+ 100 << 10, // 100Kb
+ // TODO : 1Mb overflows the stack
+ //1 << 20,
+ }
+ for _, sz := range sizes {
+ r := io.LimitReader(rand.Reader, sz)
+ enc := base64.NewEncoder(base64.StdEncoding, &buf)
+ _, err := io.Copy(enc, r)
+ if err != nil {
+ t.Fatal(err)
+ }
+ enc.Close()
+
+ start := time.Now()
+ if _, err := Parse("", buf.Bytes(), Memoize(true)); err != nil {
+ t.Fatal(err)
+ }
+ t.Log(time.Now().Sub(start))
+ }
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/test/predicates/predicates.go b/vendor/github.com/PuerkitoBio/pigeon/test/predicates/predicates.go
new file mode 100644
index 0000000000..e1b16439db
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/test/predicates/predicates.go
@@ -0,0 +1,1106 @@
+package predicates
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+var g = &grammar{
+ rules: []*rule{
+ {
+ name: "A",
+ pos: position{line: 5, col: 1, offset: 24},
+ expr: &choiceExpr{
+ pos: position{line: 5, col: 5, offset: 30},
+ alternatives: []interface{}{
+ &seqExpr{
+ pos: position{line: 5, col: 5, offset: 30},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 5, col: 5, offset: 30},
+ label: "a",
+ expr: &litMatcher{
+ pos: position{line: 5, col: 7, offset: 32},
+ val: "a",
+ ignoreCase: false,
+ },
+ },
+ ¬CodeExpr{
+ pos: position{line: 5, col: 11, offset: 36},
+ run: (*parser).callonA5,
+ },
+ },
+ },
+ &seqExpr{
+ pos: position{line: 10, col: 3, offset: 98},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 10, col: 3, offset: 98},
+ label: "b",
+ expr: &litMatcher{
+ pos: position{line: 10, col: 5, offset: 100},
+ val: "b",
+ ignoreCase: false,
+ },
+ },
+ ¬CodeExpr{
+ pos: position{line: 10, col: 9, offset: 104},
+ run: (*parser).callonA9,
+ },
+ },
+ },
+ &seqExpr{
+ pos: position{line: 15, col: 3, offset: 165},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 15, col: 3, offset: 165},
+ label: "d",
+ expr: &litMatcher{
+ pos: position{line: 15, col: 5, offset: 167},
+ val: "d",
+ ignoreCase: false,
+ },
+ },
+ &andCodeExpr{
+ pos: position{line: 15, col: 9, offset: 171},
+ run: (*parser).callonA13,
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ name: "B",
+ pos: position{line: 20, col: 1, offset: 230},
+ expr: &seqExpr{
+ pos: position{line: 20, col: 5, offset: 236},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 20, col: 5, offset: 236},
+ label: "out",
+ expr: &seqExpr{
+ pos: position{line: 20, col: 11, offset: 242},
+ exprs: []interface{}{
+ &labeledExpr{
+ pos: position{line: 20, col: 11, offset: 242},
+ label: "inner",
+ expr: &seqExpr{
+ pos: position{line: 20, col: 19, offset: 250},
+ exprs: []interface{}{
+ &charClassMatcher{
+ pos: position{line: 20, col: 19, offset: 250},
+ val: "[^abd]",
+ chars: []rune{'a', 'b', 'd'},
+ ignoreCase: false,
+ inverted: true,
+ },
+ &labeledExpr{
+ pos: position{line: 20, col: 26, offset: 257},
+ label: "innermost",
+ expr: &anyMatcher{
+ line: 20, col: 36, offset: 267,
+ },
+ },
+ &andCodeExpr{
+ pos: position{line: 20, col: 38, offset: 269},
+ run: (*parser).callonB9,
+ },
+ },
+ },
+ },
+ &andCodeExpr{
+ pos: position{line: 20, col: 60, offset: 291},
+ run: (*parser).callonB10,
+ },
+ },
+ },
+ },
+ &andCodeExpr{
+ pos: position{line: 20, col: 82, offset: 313},
+ run: (*parser).callonB11,
+ },
+ },
+ },
+ },
+ {
+ name: "C",
+ pos: position{line: 22, col: 1, offset: 334},
+ expr: &actionExpr{
+ pos: position{line: 22, col: 5, offset: 340},
+ run: (*parser).callonC1,
+ expr: &seqExpr{
+ pos: position{line: 22, col: 5, offset: 340},
+ exprs: []interface{}{
+ &andExpr{
+ pos: position{line: 22, col: 5, offset: 340},
+ expr: &labeledExpr{
+ pos: position{line: 22, col: 7, offset: 342},
+ label: "inand",
+ expr: &charClassMatcher{
+ pos: position{line: 22, col: 13, offset: 348},
+ val: "[efg]",
+ chars: []rune{'e', 'f', 'g'},
+ ignoreCase: false,
+ inverted: false,
+ },
+ },
+ },
+ &labeledExpr{
+ pos: position{line: 22, col: 20, offset: 355},
+ label: "rest",
+ expr: &ruleRefExpr{
+ pos: position{line: 22, col: 25, offset: 360},
+ name: "hij",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+}
+
+func (c *current) onA5(a interface{}) (bool, error) {
+ fmt.Println(string(c.text))
+ return true, nil
+}
+
+func (p *parser) callonA5() (bool, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onA5(stack["a"])
+}
+
+func (c *current) onA9(b interface{}) (bool, error) {
+ fmt.Println(string(c.text))
+ return true, nil
+}
+
+func (p *parser) callonA9() (bool, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onA9(stack["b"])
+}
+
+func (c *current) onA13(d interface{}) (bool, error) {
+ fmt.Println(string(c.text))
+ return true, nil
+}
+
+func (p *parser) callonA13() (bool, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onA13(stack["d"])
+}
+
+func (c *current) onB9(innermost interface{}) (bool, error) {
+ return true, nil
+}
+
+func (p *parser) callonB9() (bool, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onB9(stack["innermost"])
+}
+
+func (c *current) onB10(inner interface{}) (bool, error) {
+ return true, nil
+}
+
+func (p *parser) callonB10() (bool, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onB10(stack["inner"])
+}
+
+func (c *current) onB11(out interface{}) (bool, error) {
+ return true, nil
+}
+
+func (p *parser) callonB11() (bool, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onB11(stack["out"])
+}
+
+func (c *current) onC1(rest interface{}) (interface{}, error) {
+ return nil, nil
+}
+
+func (p *parser) callonC1() (interface{}, error) {
+ stack := p.vstack[len(p.vstack)-1]
+ _ = stack
+ return p.cur.onC1(stack["rest"])
+}
+
+var (
+ // errNoRule is returned when the grammar to parse has no rule.
+ errNoRule = errors.New("grammar has no rule")
+
+ // errInvalidEncoding is returned when the source is not properly
+ // utf8-encoded.
+ errInvalidEncoding = errors.New("invalid encoding")
+
+ // errNoMatch is returned if no match could be found.
+ errNoMatch = errors.New("no match found")
+)
+
+// Option is a function that can set an option on the parser. It returns
+// the previous setting as an Option.
+type Option func(*parser) Option
+
+// Debug creates an Option to set the debug flag to b. When set to true,
+// debugging information is printed to stdout while parsing.
+//
+// The default is false.
+func Debug(b bool) Option {
+ return func(p *parser) Option {
+ old := p.debug
+ p.debug = b
+ return Debug(old)
+ }
+}
+
+// Memoize creates an Option to set the memoize flag to b. When set to true,
+// the parser will cache all results so each expression is evaluated only
+// once. This guarantees linear parsing time even for pathological cases,
+// at the expense of more memory and slower times for typical cases.
+//
+// The default is false.
+func Memoize(b bool) Option {
+ return func(p *parser) Option {
+ old := p.memoize
+ p.memoize = b
+ return Memoize(old)
+ }
+}
+
+// Recover creates an Option to set the recover flag to b. When set to
+// true, this causes the parser to recover from panics and convert it
+// to an error. Setting it to false can be useful while debugging to
+// access the full stack trace.
+//
+// The default is true.
+func Recover(b bool) Option {
+ return func(p *parser) Option {
+ old := p.recover
+ p.recover = b
+ return Recover(old)
+ }
+}
+
+// ParseFile parses the file identified by filename.
+func ParseFile(filename string, opts ...Option) (interface{}, error) {
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ return ParseReader(filename, f, opts...)
+}
+
+// ParseReader parses the data from r using filename as information in the
+// error messages.
+func ParseReader(filename string, r io.Reader, opts ...Option) (interface{}, error) {
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+
+ return Parse(filename, b, opts...)
+}
+
+// Parse parses the data from b using filename as information in the
+// error messages.
+func Parse(filename string, b []byte, opts ...Option) (interface{}, error) {
+ return newParser(filename, b, opts...).parse(g)
+}
+
+// position records a position in the text.
+type position struct {
+ line, col, offset int
+}
+
+func (p position) String() string {
+ return fmt.Sprintf("%d:%d [%d]", p.line, p.col, p.offset)
+}
+
+// savepoint stores all state required to go back to this point in the
+// parser.
+type savepoint struct {
+ position
+ rn rune
+ w int
+}
+
+type current struct {
+ pos position // start position of the match
+ text []byte // raw text of the match
+}
+
+// the AST types...
+
+type grammar struct {
+ pos position
+ rules []*rule
+}
+
+type rule struct {
+ pos position
+ name string
+ displayName string
+ expr interface{}
+}
+
+type choiceExpr struct {
+ pos position
+ alternatives []interface{}
+}
+
+type actionExpr struct {
+ pos position
+ expr interface{}
+ run func(*parser) (interface{}, error)
+}
+
+type seqExpr struct {
+ pos position
+ exprs []interface{}
+}
+
+type labeledExpr struct {
+ pos position
+ label string
+ expr interface{}
+}
+
+type expr struct {
+ pos position
+ expr interface{}
+}
+
+type andExpr expr
+type notExpr expr
+type zeroOrOneExpr expr
+type zeroOrMoreExpr expr
+type oneOrMoreExpr expr
+
+type ruleRefExpr struct {
+ pos position
+ name string
+}
+
+type andCodeExpr struct {
+ pos position
+ run func(*parser) (bool, error)
+}
+
+type notCodeExpr struct {
+ pos position
+ run func(*parser) (bool, error)
+}
+
+type litMatcher struct {
+ pos position
+ val string
+ ignoreCase bool
+}
+
+type charClassMatcher struct {
+ pos position
+ val string
+ chars []rune
+ ranges []rune
+ classes []*unicode.RangeTable
+ ignoreCase bool
+ inverted bool
+}
+
+type anyMatcher position
+
+// errList cumulates the errors found by the parser.
+type errList []error
+
+func (e *errList) add(err error) {
+ *e = append(*e, err)
+}
+
+func (e errList) err() error {
+ if len(e) == 0 {
+ return nil
+ }
+ e.dedupe()
+ return e
+}
+
+func (e *errList) dedupe() {
+ var cleaned []error
+ set := make(map[string]bool)
+ for _, err := range *e {
+ if msg := err.Error(); !set[msg] {
+ set[msg] = true
+ cleaned = append(cleaned, err)
+ }
+ }
+ *e = cleaned
+}
+
+func (e errList) Error() string {
+ switch len(e) {
+ case 0:
+ return ""
+ case 1:
+ return e[0].Error()
+ default:
+ var buf bytes.Buffer
+
+ for i, err := range e {
+ if i > 0 {
+ buf.WriteRune('\n')
+ }
+ buf.WriteString(err.Error())
+ }
+ return buf.String()
+ }
+}
+
+// parserError wraps an error with a prefix indicating the rule in which
+// the error occurred. The original error is stored in the Inner field.
+type parserError struct {
+ Inner error
+ pos position
+ prefix string
+}
+
+// Error returns the error message.
+func (p *parserError) Error() string {
+ return p.prefix + ": " + p.Inner.Error()
+}
+
+// newParser creates a parser with the specified input source and options.
+func newParser(filename string, b []byte, opts ...Option) *parser {
+ p := &parser{
+ filename: filename,
+ errs: new(errList),
+ data: b,
+ pt: savepoint{position: position{line: 1}},
+ recover: true,
+ }
+ p.setOptions(opts)
+ return p
+}
+
+// setOptions applies the options to the parser.
+func (p *parser) setOptions(opts []Option) {
+ for _, opt := range opts {
+ opt(p)
+ }
+}
+
+type resultTuple struct {
+ v interface{}
+ b bool
+ end savepoint
+}
+
+type parser struct {
+ filename string
+ pt savepoint
+ cur current
+
+ data []byte
+ errs *errList
+
+ recover bool
+ debug bool
+ depth int
+
+ memoize bool
+ // memoization table for the packrat algorithm:
+ // map[offset in source] map[expression or rule] {value, match}
+ memo map[int]map[interface{}]resultTuple
+
+ // rules table, maps the rule identifier to the rule node
+ rules map[string]*rule
+ // variables stack, map of label to value
+ vstack []map[string]interface{}
+ // rule stack, allows identification of the current rule in errors
+ rstack []*rule
+
+ // stats
+ exprCnt int
+}
+
+// push a variable set on the vstack.
+func (p *parser) pushV() {
+ if cap(p.vstack) == len(p.vstack) {
+ // create new empty slot in the stack
+ p.vstack = append(p.vstack, nil)
+ } else {
+ // slice to 1 more
+ p.vstack = p.vstack[:len(p.vstack)+1]
+ }
+
+ // get the last args set
+ m := p.vstack[len(p.vstack)-1]
+ if m != nil && len(m) == 0 {
+ // empty map, all good
+ return
+ }
+
+ m = make(map[string]interface{})
+ p.vstack[len(p.vstack)-1] = m
+}
+
+// pop a variable set from the vstack.
+func (p *parser) popV() {
+ // if the map is not empty, clear it
+ m := p.vstack[len(p.vstack)-1]
+ if len(m) > 0 {
+ // GC that map
+ p.vstack[len(p.vstack)-1] = nil
+ }
+ p.vstack = p.vstack[:len(p.vstack)-1]
+}
+
+func (p *parser) print(prefix, s string) string {
+ if !p.debug {
+ return s
+ }
+
+ fmt.Printf("%s %d:%d:%d: %s [%#U]\n",
+ prefix, p.pt.line, p.pt.col, p.pt.offset, s, p.pt.rn)
+ return s
+}
+
+func (p *parser) in(s string) string {
+ p.depth++
+ return p.print(strings.Repeat(" ", p.depth)+">", s)
+}
+
+func (p *parser) out(s string) string {
+ p.depth--
+ return p.print(strings.Repeat(" ", p.depth)+"<", s)
+}
+
+func (p *parser) addErr(err error) {
+ p.addErrAt(err, p.pt.position)
+}
+
+func (p *parser) addErrAt(err error, pos position) {
+ var buf bytes.Buffer
+ if p.filename != "" {
+ buf.WriteString(p.filename)
+ }
+ if buf.Len() > 0 {
+ buf.WriteString(":")
+ }
+ buf.WriteString(fmt.Sprintf("%d:%d (%d)", pos.line, pos.col, pos.offset))
+ if len(p.rstack) > 0 {
+ if buf.Len() > 0 {
+ buf.WriteString(": ")
+ }
+ rule := p.rstack[len(p.rstack)-1]
+ if rule.displayName != "" {
+ buf.WriteString("rule " + rule.displayName)
+ } else {
+ buf.WriteString("rule " + rule.name)
+ }
+ }
+ pe := &parserError{Inner: err, prefix: buf.String()}
+ p.errs.add(pe)
+}
+
+// read advances the parser to the next rune.
+func (p *parser) read() {
+ p.pt.offset += p.pt.w
+ rn, n := utf8.DecodeRune(p.data[p.pt.offset:])
+ p.pt.rn = rn
+ p.pt.w = n
+ p.pt.col++
+ if rn == '\n' {
+ p.pt.line++
+ p.pt.col = 0
+ }
+
+ if rn == utf8.RuneError {
+ if n > 0 {
+ p.addErr(errInvalidEncoding)
+ }
+ }
+}
+
+// restore parser position to the savepoint pt.
+func (p *parser) restore(pt savepoint) {
+ if p.debug {
+ defer p.out(p.in("restore"))
+ }
+ if pt.offset == p.pt.offset {
+ return
+ }
+ p.pt = pt
+}
+
+// get the slice of bytes from the savepoint start to the current position.
+func (p *parser) sliceFrom(start savepoint) []byte {
+ return p.data[start.position.offset:p.pt.position.offset]
+}
+
+func (p *parser) getMemoized(node interface{}) (resultTuple, bool) {
+ if len(p.memo) == 0 {
+ return resultTuple{}, false
+ }
+ m := p.memo[p.pt.offset]
+ if len(m) == 0 {
+ return resultTuple{}, false
+ }
+ res, ok := m[node]
+ return res, ok
+}
+
+func (p *parser) setMemoized(pt savepoint, node interface{}, tuple resultTuple) {
+ if p.memo == nil {
+ p.memo = make(map[int]map[interface{}]resultTuple)
+ }
+ m := p.memo[pt.offset]
+ if m == nil {
+ m = make(map[interface{}]resultTuple)
+ p.memo[pt.offset] = m
+ }
+ m[node] = tuple
+}
+
+func (p *parser) buildRulesTable(g *grammar) {
+ p.rules = make(map[string]*rule, len(g.rules))
+ for _, r := range g.rules {
+ p.rules[r.name] = r
+ }
+}
+
+func (p *parser) parse(g *grammar) (val interface{}, err error) {
+ if len(g.rules) == 0 {
+ p.addErr(errNoRule)
+ return nil, p.errs.err()
+ }
+
+ // TODO : not super critical but this could be generated
+ p.buildRulesTable(g)
+
+ if p.recover {
+ // panic can be used in action code to stop parsing immediately
+ // and return the panic as an error.
+ defer func() {
+ if e := recover(); e != nil {
+ if p.debug {
+ defer p.out(p.in("panic handler"))
+ }
+ val = nil
+ switch e := e.(type) {
+ case error:
+ p.addErr(e)
+ default:
+ p.addErr(fmt.Errorf("%v", e))
+ }
+ err = p.errs.err()
+ }
+ }()
+ }
+
+ // start rule is rule [0]
+ p.read() // advance to first rune
+ val, ok := p.parseRule(g.rules[0])
+ if !ok {
+ if len(*p.errs) == 0 {
+ // make sure this doesn't go out silently
+ p.addErr(errNoMatch)
+ }
+ return nil, p.errs.err()
+ }
+ return val, p.errs.err()
+}
+
+func (p *parser) parseRule(rule *rule) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseRule " + rule.name))
+ }
+
+ if p.memoize {
+ res, ok := p.getMemoized(rule)
+ if ok {
+ p.restore(res.end)
+ return res.v, res.b
+ }
+ }
+
+ start := p.pt
+ p.rstack = append(p.rstack, rule)
+ p.pushV()
+ val, ok := p.parseExpr(rule.expr)
+ p.popV()
+ p.rstack = p.rstack[:len(p.rstack)-1]
+ if ok && p.debug {
+ p.print(strings.Repeat(" ", p.depth)+"MATCH", string(p.sliceFrom(start)))
+ }
+
+ if p.memoize {
+ p.setMemoized(start, rule, resultTuple{val, ok, p.pt})
+ }
+ return val, ok
+}
+
+func (p *parser) parseExpr(expr interface{}) (interface{}, bool) {
+ var pt savepoint
+ var ok bool
+
+ if p.memoize {
+ res, ok := p.getMemoized(expr)
+ if ok {
+ p.restore(res.end)
+ return res.v, res.b
+ }
+ pt = p.pt
+ }
+
+ p.exprCnt++
+ var val interface{}
+ switch expr := expr.(type) {
+ case *actionExpr:
+ val, ok = p.parseActionExpr(expr)
+ case *andCodeExpr:
+ val, ok = p.parseAndCodeExpr(expr)
+ case *andExpr:
+ val, ok = p.parseAndExpr(expr)
+ case *anyMatcher:
+ val, ok = p.parseAnyMatcher(expr)
+ case *charClassMatcher:
+ val, ok = p.parseCharClassMatcher(expr)
+ case *choiceExpr:
+ val, ok = p.parseChoiceExpr(expr)
+ case *labeledExpr:
+ val, ok = p.parseLabeledExpr(expr)
+ case *litMatcher:
+ val, ok = p.parseLitMatcher(expr)
+ case *notCodeExpr:
+ val, ok = p.parseNotCodeExpr(expr)
+ case *notExpr:
+ val, ok = p.parseNotExpr(expr)
+ case *oneOrMoreExpr:
+ val, ok = p.parseOneOrMoreExpr(expr)
+ case *ruleRefExpr:
+ val, ok = p.parseRuleRefExpr(expr)
+ case *seqExpr:
+ val, ok = p.parseSeqExpr(expr)
+ case *zeroOrMoreExpr:
+ val, ok = p.parseZeroOrMoreExpr(expr)
+ case *zeroOrOneExpr:
+ val, ok = p.parseZeroOrOneExpr(expr)
+ default:
+ panic(fmt.Sprintf("unknown expression type %T", expr))
+ }
+ if p.memoize {
+ p.setMemoized(pt, expr, resultTuple{val, ok, p.pt})
+ }
+ return val, ok
+}
+
+func (p *parser) parseActionExpr(act *actionExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseActionExpr"))
+ }
+
+ start := p.pt
+ val, ok := p.parseExpr(act.expr)
+ if ok {
+ p.cur.pos = start.position
+ p.cur.text = p.sliceFrom(start)
+ actVal, err := act.run(p)
+ if err != nil {
+ p.addErrAt(err, start.position)
+ }
+ val = actVal
+ }
+ if ok && p.debug {
+ p.print(strings.Repeat(" ", p.depth)+"MATCH", string(p.sliceFrom(start)))
+ }
+ return val, ok
+}
+
+func (p *parser) parseAndCodeExpr(and *andCodeExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseAndCodeExpr"))
+ }
+
+ ok, err := and.run(p)
+ if err != nil {
+ p.addErr(err)
+ }
+ return nil, ok
+}
+
+func (p *parser) parseAndExpr(and *andExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseAndExpr"))
+ }
+
+ pt := p.pt
+ p.pushV()
+ _, ok := p.parseExpr(and.expr)
+ p.popV()
+ p.restore(pt)
+ return nil, ok
+}
+
+func (p *parser) parseAnyMatcher(any *anyMatcher) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseAnyMatcher"))
+ }
+
+ if p.pt.rn != utf8.RuneError {
+ start := p.pt
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ return nil, false
+}
+
+func (p *parser) parseCharClassMatcher(chr *charClassMatcher) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseCharClassMatcher"))
+ }
+
+ cur := p.pt.rn
+ // can't match EOF
+ if cur == utf8.RuneError {
+ return nil, false
+ }
+ start := p.pt
+ if chr.ignoreCase {
+ cur = unicode.ToLower(cur)
+ }
+
+ // try to match in the list of available chars
+ for _, rn := range chr.chars {
+ if rn == cur {
+ if chr.inverted {
+ return nil, false
+ }
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ }
+
+ // try to match in the list of ranges
+ for i := 0; i < len(chr.ranges); i += 2 {
+ if cur >= chr.ranges[i] && cur <= chr.ranges[i+1] {
+ if chr.inverted {
+ return nil, false
+ }
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ }
+
+ // try to match in the list of Unicode classes
+ for _, cl := range chr.classes {
+ if unicode.Is(cl, cur) {
+ if chr.inverted {
+ return nil, false
+ }
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ }
+
+ if chr.inverted {
+ p.read()
+ return p.sliceFrom(start), true
+ }
+ return nil, false
+}
+
+func (p *parser) parseChoiceExpr(ch *choiceExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseChoiceExpr"))
+ }
+
+ for _, alt := range ch.alternatives {
+ p.pushV()
+ val, ok := p.parseExpr(alt)
+ p.popV()
+ if ok {
+ return val, ok
+ }
+ }
+ return nil, false
+}
+
+func (p *parser) parseLabeledExpr(lab *labeledExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseLabeledExpr"))
+ }
+
+ p.pushV()
+ val, ok := p.parseExpr(lab.expr)
+ p.popV()
+ if ok && lab.label != "" {
+ m := p.vstack[len(p.vstack)-1]
+ m[lab.label] = val
+ }
+ return val, ok
+}
+
+func (p *parser) parseLitMatcher(lit *litMatcher) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseLitMatcher"))
+ }
+
+ start := p.pt
+ for _, want := range lit.val {
+ cur := p.pt.rn
+ if lit.ignoreCase {
+ cur = unicode.ToLower(cur)
+ }
+ if cur != want {
+ p.restore(start)
+ return nil, false
+ }
+ p.read()
+ }
+ return p.sliceFrom(start), true
+}
+
+func (p *parser) parseNotCodeExpr(not *notCodeExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseNotCodeExpr"))
+ }
+
+ ok, err := not.run(p)
+ if err != nil {
+ p.addErr(err)
+ }
+ return nil, !ok
+}
+
+func (p *parser) parseNotExpr(not *notExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseNotExpr"))
+ }
+
+ pt := p.pt
+ p.pushV()
+ _, ok := p.parseExpr(not.expr)
+ p.popV()
+ p.restore(pt)
+ return nil, !ok
+}
+
+func (p *parser) parseOneOrMoreExpr(expr *oneOrMoreExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseOneOrMoreExpr"))
+ }
+
+ var vals []interface{}
+
+ for {
+ p.pushV()
+ val, ok := p.parseExpr(expr.expr)
+ p.popV()
+ if !ok {
+ if len(vals) == 0 {
+ // did not match once, no match
+ return nil, false
+ }
+ return vals, true
+ }
+ vals = append(vals, val)
+ }
+}
+
+func (p *parser) parseRuleRefExpr(ref *ruleRefExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseRuleRefExpr " + ref.name))
+ }
+
+ if ref.name == "" {
+ panic(fmt.Sprintf("%s: invalid rule: missing name", ref.pos))
+ }
+
+ rule := p.rules[ref.name]
+ if rule == nil {
+ p.addErr(fmt.Errorf("undefined rule: %s", ref.name))
+ return nil, false
+ }
+ return p.parseRule(rule)
+}
+
+func (p *parser) parseSeqExpr(seq *seqExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseSeqExpr"))
+ }
+
+ var vals []interface{}
+
+ pt := p.pt
+ for _, expr := range seq.exprs {
+ val, ok := p.parseExpr(expr)
+ if !ok {
+ p.restore(pt)
+ return nil, false
+ }
+ vals = append(vals, val)
+ }
+ return vals, true
+}
+
+func (p *parser) parseZeroOrMoreExpr(expr *zeroOrMoreExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseZeroOrMoreExpr"))
+ }
+
+ var vals []interface{}
+
+ for {
+ p.pushV()
+ val, ok := p.parseExpr(expr.expr)
+ p.popV()
+ if !ok {
+ return vals, true
+ }
+ vals = append(vals, val)
+ }
+}
+
+func (p *parser) parseZeroOrOneExpr(expr *zeroOrOneExpr) (interface{}, bool) {
+ if p.debug {
+ defer p.out(p.in("parseZeroOrOneExpr"))
+ }
+
+ p.pushV()
+ val, _ := p.parseExpr(expr.expr)
+ p.popV()
+ // whether it matched or not, consider it a match
+ return val, true
+}
+
+func rangeTable(class string) *unicode.RangeTable {
+ if rt, ok := unicode.Categories[class]; ok {
+ return rt
+ }
+ if rt, ok := unicode.Properties[class]; ok {
+ return rt
+ }
+ if rt, ok := unicode.Scripts[class]; ok {
+ return rt
+ }
+
+ // cannot happen
+ panic(fmt.Sprintf("invalid Unicode class: %s", class))
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/test/predicates/predicates.peg b/vendor/github.com/PuerkitoBio/pigeon/test/predicates/predicates.peg
new file mode 100644
index 0000000000..90ecc59592
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/test/predicates/predicates.peg
@@ -0,0 +1,24 @@
+{
+package predicates
+}
+
+A ← a:'a' !{
+ fmt.Println(string(c.text))
+ return true, nil
+}
+
+/ b:'b' !{
+ fmt.Println(string(c.text))
+ return true, nil
+}
+
+/ d:'d' &{
+ fmt.Println(string(c.text))
+ return true, nil
+}
+
+B ← out:( inner:( [^abd] innermost:. &{return true, nil} ) &{return true, nil} ) &{return true, nil}
+
+C ← &(inand:[efg]) rest:hij {
+ return nil, nil
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/test/predicates/predicates_test.go b/vendor/github.com/PuerkitoBio/pigeon/test/predicates/predicates_test.go
new file mode 100644
index 0000000000..e2607205a3
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/test/predicates/predicates_test.go
@@ -0,0 +1,31 @@
+package predicates
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestPredicatesArgs(t *testing.T) {
+ methods := map[string][]string{
+ "onA5": {"a"},
+ "onA9": {"b"},
+ "onA13": {"d"},
+ "onB9": {"innermost"},
+ "onB10": {"inner"},
+ "onB11": {"out"},
+ "onC1": {"rest"},
+ }
+
+ typ := reflect.TypeOf(¤t{})
+ for nm, args := range methods {
+ meth, ok := typ.MethodByName(nm)
+ if !ok {
+ t.Errorf("want *current to have method %s", nm)
+ continue
+ }
+ if n := meth.Func.Type().NumIn(); n != len(args)+1 {
+ t.Errorf("%q: want %d arguments, got %d", nm, len(args)+1, n)
+ continue
+ }
+ }
+}
diff --git a/vendor/github.com/PuerkitoBio/pigeon/unicode_classes.go b/vendor/github.com/PuerkitoBio/pigeon/unicode_classes.go
new file mode 100644
index 0000000000..3600156296
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/pigeon/unicode_classes.go
@@ -0,0 +1,200 @@
+// This file is generated by the misc/cmd/unicode-classes tool.
+// Do not edit.
+
+package main
+
+var unicodeClasses = map[string]bool{
+ "ASCII_Hex_Digit": true,
+ "Arabic": true,
+ "Armenian": true,
+ "Avestan": true,
+ "Balinese": true,
+ "Bamum": true,
+ "Bassa_Vah": true,
+ "Batak": true,
+ "Bengali": true,
+ "Bidi_Control": true,
+ "Bopomofo": true,
+ "Brahmi": true,
+ "Braille": true,
+ "Buginese": true,
+ "Buhid": true,
+ "C": true,
+ "Canadian_Aboriginal": true,
+ "Carian": true,
+ "Caucasian_Albanian": true,
+ "Cc": true,
+ "Cf": true,
+ "Chakma": true,
+ "Cham": true,
+ "Cherokee": true,
+ "Co": true,
+ "Common": true,
+ "Coptic": true,
+ "Cs": true,
+ "Cuneiform": true,
+ "Cypriot": true,
+ "Cyrillic": true,
+ "Dash": true,
+ "Deprecated": true,
+ "Deseret": true,
+ "Devanagari": true,
+ "Diacritic": true,
+ "Duployan": true,
+ "Egyptian_Hieroglyphs": true,
+ "Elbasan": true,
+ "Ethiopic": true,
+ "Extender": true,
+ "Georgian": true,
+ "Glagolitic": true,
+ "Gothic": true,
+ "Grantha": true,
+ "Greek": true,
+ "Gujarati": true,
+ "Gurmukhi": true,
+ "Han": true,
+ "Hangul": true,
+ "Hanunoo": true,
+ "Hebrew": true,
+ "Hex_Digit": true,
+ "Hiragana": true,
+ "Hyphen": true,
+ "IDS_Binary_Operator": true,
+ "IDS_Trinary_Operator": true,
+ "Ideographic": true,
+ "Imperial_Aramaic": true,
+ "Inherited": true,
+ "Inscriptional_Pahlavi": true,
+ "Inscriptional_Parthian": true,
+ "Javanese": true,
+ "Join_Control": true,
+ "Kaithi": true,
+ "Kannada": true,
+ "Katakana": true,
+ "Kayah_Li": true,
+ "Kharoshthi": true,
+ "Khmer": true,
+ "Khojki": true,
+ "Khudawadi": true,
+ "L": true,
+ "Lao": true,
+ "Latin": true,
+ "Lepcha": true,
+ "Limbu": true,
+ "Linear_A": true,
+ "Linear_B": true,
+ "Lisu": true,
+ "Ll": true,
+ "Lm": true,
+ "Lo": true,
+ "Logical_Order_Exception": true,
+ "Lt": true,
+ "Lu": true,
+ "Lycian": true,
+ "Lydian": true,
+ "M": true,
+ "Mahajani": true,
+ "Malayalam": true,
+ "Mandaic": true,
+ "Manichaean": true,
+ "Mc": true,
+ "Me": true,
+ "Meetei_Mayek": true,
+ "Mende_Kikakui": true,
+ "Meroitic_Cursive": true,
+ "Meroitic_Hieroglyphs": true,
+ "Miao": true,
+ "Mn": true,
+ "Modi": true,
+ "Mongolian": true,
+ "Mro": true,
+ "Myanmar": true,
+ "N": true,
+ "Nabataean": true,
+ "Nd": true,
+ "New_Tai_Lue": true,
+ "Nko": true,
+ "Nl": true,
+ "No": true,
+ "Noncharacter_Code_Point": true,
+ "Ogham": true,
+ "Ol_Chiki": true,
+ "Old_Italic": true,
+ "Old_North_Arabian": true,
+ "Old_Permic": true,
+ "Old_Persian": true,
+ "Old_South_Arabian": true,
+ "Old_Turkic": true,
+ "Oriya": true,
+ "Osmanya": true,
+ "Other_Alphabetic": true,
+ "Other_Default_Ignorable_Code_Point": true,
+ "Other_Grapheme_Extend": true,
+ "Other_ID_Continue": true,
+ "Other_ID_Start": true,
+ "Other_Lowercase": true,
+ "Other_Math": true,
+ "Other_Uppercase": true,
+ "P": true,
+ "Pahawh_Hmong": true,
+ "Palmyrene": true,
+ "Pattern_Syntax": true,
+ "Pattern_White_Space": true,
+ "Pau_Cin_Hau": true,
+ "Pc": true,
+ "Pd": true,
+ "Pe": true,
+ "Pf": true,
+ "Phags_Pa": true,
+ "Phoenician": true,
+ "Pi": true,
+ "Po": true,
+ "Ps": true,
+ "Psalter_Pahlavi": true,
+ "Quotation_Mark": true,
+ "Radical": true,
+ "Rejang": true,
+ "Runic": true,
+ "S": true,
+ "STerm": true,
+ "Samaritan": true,
+ "Saurashtra": true,
+ "Sc": true,
+ "Sharada": true,
+ "Shavian": true,
+ "Siddham": true,
+ "Sinhala": true,
+ "Sk": true,
+ "Sm": true,
+ "So": true,
+ "Soft_Dotted": true,
+ "Sora_Sompeng": true,
+ "Sundanese": true,
+ "Syloti_Nagri": true,
+ "Syriac": true,
+ "Tagalog": true,
+ "Tagbanwa": true,
+ "Tai_Le": true,
+ "Tai_Tham": true,
+ "Tai_Viet": true,
+ "Takri": true,
+ "Tamil": true,
+ "Telugu": true,
+ "Terminal_Punctuation": true,
+ "Thaana": true,
+ "Thai": true,
+ "Tibetan": true,
+ "Tifinagh": true,
+ "Tirhuta": true,
+ "Ugaritic": true,
+ "Unified_Ideograph": true,
+ "Vai": true,
+ "Variation_Selector": true,
+ "Warang_Citi": true,
+ "White_Space": true,
+ "Yi": true,
+ "Z": true,
+ "Zl": true,
+ "Zp": true,
+ "Zs": true,
+}
diff --git a/vendor/github.com/armon/consul-api/.gitignore b/vendor/github.com/armon/consul-api/.gitignore
new file mode 100644
index 0000000000..836562412f
--- /dev/null
+++ b/vendor/github.com/armon/consul-api/.gitignore
@@ -0,0 +1,23 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
diff --git a/vendor/github.com/armon/consul-api/LICENSE b/vendor/github.com/armon/consul-api/LICENSE
new file mode 100644
index 0000000000..f0e5c79e18
--- /dev/null
+++ b/vendor/github.com/armon/consul-api/LICENSE
@@ -0,0 +1,362 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
\ No newline at end of file
diff --git a/vendor/github.com/armon/consul-api/README.md b/vendor/github.com/armon/consul-api/README.md
new file mode 100644
index 0000000000..c95d9dee33
--- /dev/null
+++ b/vendor/github.com/armon/consul-api/README.md
@@ -0,0 +1,42 @@
+consul-api
+==========
+
+*DEPRECATED* Please use [consul api package](https://github.com/hashicorp/consul/tree/master/api) instead.
+Godocs for that package [are here](http://godoc.org/github.com/hashicorp/consul/api).
+
+This package provides the `consulapi` package which attempts to
+provide programmatic access to the full Consul API.
+
+Currently, all of the Consul APIs included in version 0.4 are supported.
+
+Documentation
+=============
+
+The full documentation is available on [Godoc](http://godoc.org/github.com/armon/consul-api)
+
+Usage
+=====
+
+Below is an example of using the Consul client:
+
+```go
+// Get a new client, with KV endpoints
+client, _ := consulapi.NewClient(consulapi.DefaultConfig())
+kv := client.KV()
+
+// PUT a new KV pair
+p := &consulapi.KVPair{Key: "foo", Value: []byte("test")}
+_, err := kv.Put(p, nil)
+if err != nil {
+ panic(err)
+}
+
+// Lookup the pair
+pair, _, err := kv.Get("foo", nil)
+if err != nil {
+ panic(err)
+}
+fmt.Printf("KV: %v", pair)
+
+```
+
diff --git a/vendor/github.com/armon/consul-api/acl.go b/vendor/github.com/armon/consul-api/acl.go
new file mode 100644
index 0000000000..e0179f54df
--- /dev/null
+++ b/vendor/github.com/armon/consul-api/acl.go
@@ -0,0 +1,140 @@
+package consulapi
+
+const (
+ // ACLCLientType is the client type token
+ ACLClientType = "client"
+
+ // ACLManagementType is the management type token
+ ACLManagementType = "management"
+)
+
+// ACLEntry is used to represent an ACL entry
+type ACLEntry struct {
+ CreateIndex uint64
+ ModifyIndex uint64
+ ID string
+ Name string
+ Type string
+ Rules string
+}
+
+// ACL can be used to query the ACL endpoints
+type ACL struct {
+ c *Client
+}
+
+// ACL returns a handle to the ACL endpoints
+func (c *Client) ACL() *ACL {
+ return &ACL{c}
+}
+
+// Create is used to generate a new token with the given parameters
+func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) {
+ r := a.c.newRequest("PUT", "/v1/acl/create")
+ r.setWriteOptions(q)
+ r.obj = acl
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return "", nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ var out struct{ ID string }
+ if err := decodeBody(resp, &out); err != nil {
+ return "", nil, err
+ }
+ return out.ID, wm, nil
+}
+
+// Update is used to update the rules of an existing token
+func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) {
+ r := a.c.newRequest("PUT", "/v1/acl/update")
+ r.setWriteOptions(q)
+ r.obj = acl
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ return wm, nil
+}
+
+// Destroy is used to destroy a given ACL token ID
+func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
+ r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id)
+ r.setWriteOptions(q)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ return wm, nil
+}
+
+// Clone is used to return a new token cloned from an existing one
+func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) {
+ r := a.c.newRequest("PUT", "/v1/acl/clone/"+id)
+ r.setWriteOptions(q)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return "", nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ var out struct{ ID string }
+ if err := decodeBody(resp, &out); err != nil {
+ return "", nil, err
+ }
+ return out.ID, wm, nil
+}
+
+// Info is used to query for information about an ACL token
+func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) {
+ r := a.c.newRequest("GET", "/v1/acl/info/"+id)
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var entries []*ACLEntry
+ if err := decodeBody(resp, &entries); err != nil {
+ return nil, nil, err
+ }
+ if len(entries) > 0 {
+ return entries[0], qm, nil
+ }
+ return nil, qm, nil
+}
+
+// List is used to get all the ACL tokens
+func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) {
+ r := a.c.newRequest("GET", "/v1/acl/list")
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var entries []*ACLEntry
+ if err := decodeBody(resp, &entries); err != nil {
+ return nil, nil, err
+ }
+ return entries, qm, nil
+}
diff --git a/vendor/github.com/armon/consul-api/acl_test.go b/vendor/github.com/armon/consul-api/acl_test.go
new file mode 100644
index 0000000000..7932c5905a
--- /dev/null
+++ b/vendor/github.com/armon/consul-api/acl_test.go
@@ -0,0 +1,140 @@
+package consulapi
+
+import (
+ "os"
+ "testing"
+)
+
+// ROOT is a management token for the tests
+var CONSUL_ROOT string
+
+func init() {
+ CONSUL_ROOT = os.Getenv("CONSUL_ROOT")
+}
+
+func TestACL_CreateDestroy(t *testing.T) {
+ if CONSUL_ROOT == "" {
+ t.SkipNow()
+ }
+ c := makeClient(t)
+ c.config.Token = CONSUL_ROOT
+ acl := c.ACL()
+
+ ae := ACLEntry{
+ Name: "API test",
+ Type: ACLClientType,
+ Rules: `key "" { policy = "deny" }`,
+ }
+
+ id, wm, err := acl.Create(&ae, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if wm.RequestTime == 0 {
+ t.Fatalf("bad: %v", wm)
+ }
+
+ if id == "" {
+ t.Fatalf("invalid: %v", id)
+ }
+
+ ae2, _, err := acl.Info(id, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if ae2.Name != ae.Name || ae2.Type != ae.Type || ae2.Rules != ae.Rules {
+ t.Fatalf("Bad: %#v", ae2)
+ }
+
+ wm, err = acl.Destroy(id, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if wm.RequestTime == 0 {
+ t.Fatalf("bad: %v", wm)
+ }
+}
+
+func TestACL_CloneDestroy(t *testing.T) {
+ if CONSUL_ROOT == "" {
+ t.SkipNow()
+ }
+ c := makeClient(t)
+ c.config.Token = CONSUL_ROOT
+ acl := c.ACL()
+
+ id, wm, err := acl.Clone(CONSUL_ROOT, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if wm.RequestTime == 0 {
+ t.Fatalf("bad: %v", wm)
+ }
+
+ if id == "" {
+ t.Fatalf("invalid: %v", id)
+ }
+
+ wm, err = acl.Destroy(id, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if wm.RequestTime == 0 {
+ t.Fatalf("bad: %v", wm)
+ }
+}
+
+func TestACL_Info(t *testing.T) {
+ if CONSUL_ROOT == "" {
+ t.SkipNow()
+ }
+ c := makeClient(t)
+ c.config.Token = CONSUL_ROOT
+ acl := c.ACL()
+
+ ae, qm, err := acl.Info(CONSUL_ROOT, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if qm.LastIndex == 0 {
+ t.Fatalf("bad: %v", qm)
+ }
+ if !qm.KnownLeader {
+ t.Fatalf("bad: %v", qm)
+ }
+
+ if ae == nil || ae.ID != CONSUL_ROOT || ae.Type != ACLManagementType {
+ t.Fatalf("bad: %#v", ae)
+ }
+}
+
+func TestACL_List(t *testing.T) {
+ if CONSUL_ROOT == "" {
+ t.SkipNow()
+ }
+ c := makeClient(t)
+ c.config.Token = CONSUL_ROOT
+ acl := c.ACL()
+
+ acls, qm, err := acl.List(nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if len(acls) < 2 {
+ t.Fatalf("bad: %v", acls)
+ }
+
+ if qm.LastIndex == 0 {
+ t.Fatalf("bad: %v", qm)
+ }
+ if !qm.KnownLeader {
+ t.Fatalf("bad: %v", qm)
+ }
+}
diff --git a/vendor/github.com/armon/consul-api/agent.go b/vendor/github.com/armon/consul-api/agent.go
new file mode 100644
index 0000000000..eec93cb970
--- /dev/null
+++ b/vendor/github.com/armon/consul-api/agent.go
@@ -0,0 +1,272 @@
+package consulapi
+
+import (
+ "fmt"
+)
+
+// AgentCheck represents a check known to the agent
+type AgentCheck struct {
+ Node string
+ CheckID string
+ Name string
+ Status string
+ Notes string
+ Output string
+ ServiceID string
+ ServiceName string
+}
+
+// AgentService represents a service known to the agent
+type AgentService struct {
+ ID string
+ Service string
+ Tags []string
+ Port int
+}
+
+// AgentMember represents a cluster member known to the agent
+type AgentMember struct {
+ Name string
+ Addr string
+ Port uint16
+ Tags map[string]string
+ Status int
+ ProtocolMin uint8
+ ProtocolMax uint8
+ ProtocolCur uint8
+ DelegateMin uint8
+ DelegateMax uint8
+ DelegateCur uint8
+}
+
+// AgentServiceRegistration is used to register a new service
+type AgentServiceRegistration struct {
+ ID string `json:",omitempty"`
+ Name string `json:",omitempty"`
+ Tags []string `json:",omitempty"`
+ Port int `json:",omitempty"`
+ Check *AgentServiceCheck
+}
+
+// AgentCheckRegistration is used to register a new check
+type AgentCheckRegistration struct {
+ ID string `json:",omitempty"`
+ Name string `json:",omitempty"`
+ Notes string `json:",omitempty"`
+ AgentServiceCheck
+}
+
+// AgentServiceCheck is used to create an associated
+// check for a service
+type AgentServiceCheck struct {
+ Script string `json:",omitempty"`
+ Interval string `json:",omitempty"`
+ TTL string `json:",omitempty"`
+}
+
+// Agent can be used to query the Agent endpoints
+type Agent struct {
+ c *Client
+
+ // cache the node name
+ nodeName string
+}
+
+// Agent returns a handle to the agent endpoints
+func (c *Client) Agent() *Agent {
+ return &Agent{c: c}
+}
+
+// Self is used to query the agent we are speaking to for
+// information about itself
+func (a *Agent) Self() (map[string]map[string]interface{}, error) {
+ r := a.c.newRequest("GET", "/v1/agent/self")
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var out map[string]map[string]interface{}
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// NodeName is used to get the node name of the agent
+func (a *Agent) NodeName() (string, error) {
+ if a.nodeName != "" {
+ return a.nodeName, nil
+ }
+ info, err := a.Self()
+ if err != nil {
+ return "", err
+ }
+ name := info["Config"]["NodeName"].(string)
+ a.nodeName = name
+ return name, nil
+}
+
+// Checks returns the locally registered checks
+func (a *Agent) Checks() (map[string]*AgentCheck, error) {
+ r := a.c.newRequest("GET", "/v1/agent/checks")
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var out map[string]*AgentCheck
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Services returns the locally registered services
+func (a *Agent) Services() (map[string]*AgentService, error) {
+ r := a.c.newRequest("GET", "/v1/agent/services")
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var out map[string]*AgentService
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Members returns the known gossip members. The WAN
+// flag can be used to query a server for WAN members.
+func (a *Agent) Members(wan bool) ([]*AgentMember, error) {
+ r := a.c.newRequest("GET", "/v1/agent/members")
+ if wan {
+ r.params.Set("wan", "1")
+ }
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var out []*AgentMember
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// ServiceRegister is used to register a new service with
+// the local agent
+func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error {
+ r := a.c.newRequest("PUT", "/v1/agent/service/register")
+ r.obj = service
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// ServiceDeregister is used to deregister a service with
+// the local agent
+func (a *Agent) ServiceDeregister(serviceID string) error {
+ r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID)
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// PassTTL is used to set a TTL check to the passing state
+func (a *Agent) PassTTL(checkID, note string) error {
+ return a.UpdateTTL(checkID, note, "pass")
+}
+
+// WarnTTL is used to set a TTL check to the warning state
+func (a *Agent) WarnTTL(checkID, note string) error {
+ return a.UpdateTTL(checkID, note, "warn")
+}
+
+// FailTTL is used to set a TTL check to the failing state
+func (a *Agent) FailTTL(checkID, note string) error {
+ return a.UpdateTTL(checkID, note, "fail")
+}
+
+// UpdateTTL is used to update the TTL of a check
+func (a *Agent) UpdateTTL(checkID, note, status string) error {
+ switch status {
+ case "pass":
+ case "warn":
+ case "fail":
+ default:
+ return fmt.Errorf("Invalid status: %s", status)
+ }
+ endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID)
+ r := a.c.newRequest("PUT", endpoint)
+ r.params.Set("note", note)
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// CheckRegister is used to register a new check with
+// the local agent
+func (a *Agent) CheckRegister(check *AgentCheckRegistration) error {
+ r := a.c.newRequest("PUT", "/v1/agent/check/register")
+ r.obj = check
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// CheckDeregister is used to deregister a check with
+// the local agent
+func (a *Agent) CheckDeregister(checkID string) error {
+ r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID)
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// Join is used to instruct the agent to attempt a join to
+// another cluster member
+func (a *Agent) Join(addr string, wan bool) error {
+ r := a.c.newRequest("PUT", "/v1/agent/join/"+addr)
+ if wan {
+ r.params.Set("wan", "1")
+ }
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// ForceLeave is used to have the agent eject a failed node
+func (a *Agent) ForceLeave(node string) error {
+ r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node)
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
diff --git a/vendor/github.com/armon/consul-api/agent_test.go b/vendor/github.com/armon/consul-api/agent_test.go
new file mode 100644
index 0000000000..8d97af4af5
--- /dev/null
+++ b/vendor/github.com/armon/consul-api/agent_test.go
@@ -0,0 +1,162 @@
+package consulapi
+
+import (
+ "testing"
+)
+
+func TestAgent_Self(t *testing.T) {
+ c := makeClient(t)
+ agent := c.Agent()
+
+ info, err := agent.Self()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ name := info["Config"]["NodeName"]
+ if name == "" {
+ t.Fatalf("bad: %v", info)
+ }
+}
+
+func TestAgent_Members(t *testing.T) {
+ c := makeClient(t)
+ agent := c.Agent()
+
+ members, err := agent.Members(false)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if len(members) != 1 {
+ t.Fatalf("bad: %v", members)
+ }
+}
+
+func TestAgent_Services(t *testing.T) {
+ c := makeClient(t)
+ agent := c.Agent()
+
+ reg := &AgentServiceRegistration{
+ Name: "foo",
+ Tags: []string{"bar", "baz"},
+ Port: 8000,
+ Check: &AgentServiceCheck{
+ TTL: "15s",
+ },
+ }
+ if err := agent.ServiceRegister(reg); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ services, err := agent.Services()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if _, ok := services["foo"]; !ok {
+ t.Fatalf("missing service: %v", services)
+ }
+
+ checks, err := agent.Checks()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if _, ok := checks["service:foo"]; !ok {
+ t.Fatalf("missing check: %v", checks)
+ }
+
+ if err := agent.ServiceDeregister("foo"); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func TestAgent_SetTTLStatus(t *testing.T) {
+ c := makeClient(t)
+ agent := c.Agent()
+
+ reg := &AgentServiceRegistration{
+ Name: "foo",
+ Check: &AgentServiceCheck{
+ TTL: "15s",
+ },
+ }
+ if err := agent.ServiceRegister(reg); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if err := agent.WarnTTL("service:foo", "test"); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ checks, err := agent.Checks()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ chk, ok := checks["service:foo"]
+ if !ok {
+ t.Fatalf("missing check: %v", checks)
+ }
+ if chk.Status != "warning" {
+ t.Fatalf("Bad: %#v", chk)
+ }
+ if chk.Output != "test" {
+ t.Fatalf("Bad: %#v", chk)
+ }
+
+ if err := agent.ServiceDeregister("foo"); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func TestAgent_Checks(t *testing.T) {
+ c := makeClient(t)
+ agent := c.Agent()
+
+ reg := &AgentCheckRegistration{
+ Name: "foo",
+ }
+ reg.TTL = "15s"
+ if err := agent.CheckRegister(reg); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ checks, err := agent.Checks()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if _, ok := checks["foo"]; !ok {
+ t.Fatalf("missing check: %v", checks)
+ }
+
+ if err := agent.CheckDeregister("foo"); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func TestAgent_Join(t *testing.T) {
+ c := makeClient(t)
+ agent := c.Agent()
+
+ info, err := agent.Self()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Join ourself
+ addr := info["Config"]["AdvertiseAddr"].(string)
+ err = agent.Join(addr, false)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+}
+
+func TestAgent_ForceLeave(t *testing.T) {
+ c := makeClient(t)
+ agent := c.Agent()
+
+ // Eject somebody
+ err := agent.ForceLeave("foo")
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+}
diff --git a/vendor/github.com/armon/consul-api/api.go b/vendor/github.com/armon/consul-api/api.go
new file mode 100644
index 0000000000..e1335769b7
--- /dev/null
+++ b/vendor/github.com/armon/consul-api/api.go
@@ -0,0 +1,323 @@
+package consulapi
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+ "time"
+)
+
+// QueryOptions are used to parameterize a query
+type QueryOptions struct {
+ // Providing a datacenter overwrites the DC provided
+ // by the Config
+ Datacenter string
+
+ // AllowStale allows any Consul server (non-leader) to service
+ // a read. This allows for lower latency and higher throughput
+ AllowStale bool
+
+ // RequireConsistent forces the read to be fully consistent.
+ // This is more expensive but prevents ever performing a stale
+ // read.
+ RequireConsistent bool
+
+ // WaitIndex is used to enable a blocking query. Waits
+ // until the timeout or the next index is reached
+ WaitIndex uint64
+
+ // WaitTime is used to bound the duration of a wait.
+ // Defaults to that of the Config, but can be overriden.
+ WaitTime time.Duration
+
+ // Token is used to provide a per-request ACL token
+ // which overrides the agent's default token.
+ Token string
+}
+
+// WriteOptions are used to parameterize a write
+type WriteOptions struct {
+ // Providing a datacenter overwrites the DC provided
+ // by the Config
+ Datacenter string
+
+ // Token is used to provide a per-request ACL token
+ // which overrides the agent's default token.
+ Token string
+}
+
+// QueryMeta is used to return meta data about a query
+type QueryMeta struct {
+ // LastIndex. This can be used as a WaitIndex to perform
+ // a blocking query
+ LastIndex uint64
+
+ // Time of last contact from the leader for the
+ // server servicing the request
+ LastContact time.Duration
+
+ // Is there a known leader
+ KnownLeader bool
+
+ // How long did the request take
+ RequestTime time.Duration
+}
+
+// WriteMeta is used to return meta data about a write
+type WriteMeta struct {
+ // How long did the request take
+ RequestTime time.Duration
+}
+
+// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication
+type HttpBasicAuth struct {
+ // Username to use for HTTP Basic Authentication
+ Username string
+
+ // Password to use for HTTP Basic Authentication
+ Password string
+}
+
+// Config is used to configure the creation of a client
+type Config struct {
+ // Address is the address of the Consul server
+ Address string
+
+ // Scheme is the URI scheme for the Consul server
+ Scheme string
+
+ // Datacenter to use. If not provided, the default agent datacenter is used.
+ Datacenter string
+
+ // HttpClient is the client to use. Default will be
+ // used if not provided.
+ HttpClient *http.Client
+
+ // HttpAuth is the auth info to use for http access.
+ HttpAuth *HttpBasicAuth
+
+ // WaitTime limits how long a Watch will block. If not provided,
+ // the agent default values will be used.
+ WaitTime time.Duration
+
+ // Token is used to provide a per-request ACL token
+ // which overrides the agent's default token.
+ Token string
+}
+
+// DefaultConfig returns a default configuration for the client
+func DefaultConfig() *Config {
+ return &Config{
+ Address: "127.0.0.1:8500",
+ Scheme: "http",
+ HttpClient: http.DefaultClient,
+ }
+}
+
+// Client provides a client to the Consul API
+type Client struct {
+ config Config
+}
+
+// NewClient returns a new client
+func NewClient(config *Config) (*Client, error) {
+ // bootstrap the config
+ defConfig := DefaultConfig()
+
+ if len(config.Address) == 0 {
+ config.Address = defConfig.Address
+ }
+
+ if len(config.Scheme) == 0 {
+ config.Scheme = defConfig.Scheme
+ }
+
+ if config.HttpClient == nil {
+ config.HttpClient = defConfig.HttpClient
+ }
+
+ client := &Client{
+ config: *config,
+ }
+ return client, nil
+}
+
+// request is used to help build up a request
+type request struct {
+ config *Config
+ method string
+ url *url.URL
+ params url.Values
+ body io.Reader
+ obj interface{}
+}
+
+// setQueryOptions is used to annotate the request with
+// additional query options
+func (r *request) setQueryOptions(q *QueryOptions) {
+ if q == nil {
+ return
+ }
+ if q.Datacenter != "" {
+ r.params.Set("dc", q.Datacenter)
+ }
+ if q.AllowStale {
+ r.params.Set("stale", "")
+ }
+ if q.RequireConsistent {
+ r.params.Set("consistent", "")
+ }
+ if q.WaitIndex != 0 {
+ r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10))
+ }
+ if q.WaitTime != 0 {
+ r.params.Set("wait", durToMsec(q.WaitTime))
+ }
+ if q.Token != "" {
+ r.params.Set("token", q.Token)
+ }
+}
+
+// durToMsec converts a duration to a millisecond specified string
+func durToMsec(dur time.Duration) string {
+ return fmt.Sprintf("%dms", dur/time.Millisecond)
+}
+
+// setWriteOptions is used to annotate the request with
+// additional write options
+func (r *request) setWriteOptions(q *WriteOptions) {
+ if q == nil {
+ return
+ }
+ if q.Datacenter != "" {
+ r.params.Set("dc", q.Datacenter)
+ }
+ if q.Token != "" {
+ r.params.Set("token", q.Token)
+ }
+}
+
+// toHTTP converts the request to an HTTP request
+func (r *request) toHTTP() (*http.Request, error) {
+ // Encode the query parameters
+ r.url.RawQuery = r.params.Encode()
+
+ // Get the url sring
+ urlRaw := r.url.String()
+
+ // Check if we should encode the body
+ if r.body == nil && r.obj != nil {
+ if b, err := encodeBody(r.obj); err != nil {
+ return nil, err
+ } else {
+ r.body = b
+ }
+ }
+
+ // Create the HTTP request
+ req, err := http.NewRequest(r.method, urlRaw, r.body)
+
+ // Setup auth
+ if err == nil && r.config.HttpAuth != nil {
+ req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password)
+ }
+
+ return req, err
+}
+
+// newRequest is used to create a new request
+func (c *Client) newRequest(method, path string) *request {
+ r := &request{
+ config: &c.config,
+ method: method,
+ url: &url.URL{
+ Scheme: c.config.Scheme,
+ Host: c.config.Address,
+ Path: path,
+ },
+ params: make(map[string][]string),
+ }
+ if c.config.Datacenter != "" {
+ r.params.Set("dc", c.config.Datacenter)
+ }
+ if c.config.WaitTime != 0 {
+ r.params.Set("wait", durToMsec(r.config.WaitTime))
+ }
+ if c.config.Token != "" {
+ r.params.Set("token", r.config.Token)
+ }
+ return r
+}
+
+// doRequest runs a request with our client
+func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) {
+ req, err := r.toHTTP()
+ if err != nil {
+ return 0, nil, err
+ }
+ start := time.Now()
+ resp, err := c.config.HttpClient.Do(req)
+ diff := time.Now().Sub(start)
+ return diff, resp, err
+}
+
+// parseQueryMeta is used to help parse query meta-data
+func parseQueryMeta(resp *http.Response, q *QueryMeta) error {
+ header := resp.Header
+
+ // Parse the X-Consul-Index
+ index, err := strconv.ParseUint(header.Get("X-Consul-Index"), 10, 64)
+ if err != nil {
+ return fmt.Errorf("Failed to parse X-Consul-Index: %v", err)
+ }
+ q.LastIndex = index
+
+ // Parse the X-Consul-LastContact
+ last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64)
+ if err != nil {
+ return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err)
+ }
+ q.LastContact = time.Duration(last) * time.Millisecond
+
+ // Parse the X-Consul-KnownLeader
+ switch header.Get("X-Consul-KnownLeader") {
+ case "true":
+ q.KnownLeader = true
+ default:
+ q.KnownLeader = false
+ }
+ return nil
+}
+
+// decodeBody is used to JSON decode a body
+func decodeBody(resp *http.Response, out interface{}) error {
+ dec := json.NewDecoder(resp.Body)
+ return dec.Decode(out)
+}
+
+// encodeBody is used to encode a request body
+func encodeBody(obj interface{}) (io.Reader, error) {
+ buf := bytes.NewBuffer(nil)
+ enc := json.NewEncoder(buf)
+ if err := enc.Encode(obj); err != nil {
+ return nil, err
+ }
+ return buf, nil
+}
+
+// requireOK is used to wrap doRequest and check for a 200
+func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) {
+ if e != nil {
+ return d, resp, e
+ }
+ if resp.StatusCode != 200 {
+ var buf bytes.Buffer
+ io.Copy(&buf, resp.Body)
+ return d, resp, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
+ }
+ return d, resp, e
+}
diff --git a/vendor/github.com/armon/consul-api/api_test.go b/vendor/github.com/armon/consul-api/api_test.go
new file mode 100644
index 0000000000..3a608c539b
--- /dev/null
+++ b/vendor/github.com/armon/consul-api/api_test.go
@@ -0,0 +1,126 @@
+package consulapi
+
+import (
+ crand "crypto/rand"
+ "fmt"
+ "net/http"
+ "testing"
+ "time"
+)
+
+func makeClient(t *testing.T) *Client {
+ conf := DefaultConfig()
+ client, err := NewClient(conf)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ return client
+}
+
+func testKey() string {
+ buf := make([]byte, 16)
+ if _, err := crand.Read(buf); err != nil {
+ panic(fmt.Errorf("Failed to read random bytes: %v", err))
+ }
+
+ return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
+ buf[0:4],
+ buf[4:6],
+ buf[6:8],
+ buf[8:10],
+ buf[10:16])
+}
+
+func TestSetQueryOptions(t *testing.T) {
+ c := makeClient(t)
+ r := c.newRequest("GET", "/v1/kv/foo")
+ q := &QueryOptions{
+ Datacenter: "foo",
+ AllowStale: true,
+ RequireConsistent: true,
+ WaitIndex: 1000,
+ WaitTime: 100 * time.Second,
+ Token: "12345",
+ }
+ r.setQueryOptions(q)
+
+ if r.params.Get("dc") != "foo" {
+ t.Fatalf("bad: %v", r.params)
+ }
+ if _, ok := r.params["stale"]; !ok {
+ t.Fatalf("bad: %v", r.params)
+ }
+ if _, ok := r.params["consistent"]; !ok {
+ t.Fatalf("bad: %v", r.params)
+ }
+ if r.params.Get("index") != "1000" {
+ t.Fatalf("bad: %v", r.params)
+ }
+ if r.params.Get("wait") != "100000ms" {
+ t.Fatalf("bad: %v", r.params)
+ }
+ if r.params.Get("token") != "12345" {
+ t.Fatalf("bad: %v", r.params)
+ }
+}
+
+func TestSetWriteOptions(t *testing.T) {
+ c := makeClient(t)
+ r := c.newRequest("GET", "/v1/kv/foo")
+ q := &WriteOptions{
+ Datacenter: "foo",
+ Token: "23456",
+ }
+ r.setWriteOptions(q)
+
+ if r.params.Get("dc") != "foo" {
+ t.Fatalf("bad: %v", r.params)
+ }
+ if r.params.Get("token") != "23456" {
+ t.Fatalf("bad: %v", r.params)
+ }
+}
+
+func TestRequestToHTTP(t *testing.T) {
+ c := makeClient(t)
+ r := c.newRequest("DELETE", "/v1/kv/foo")
+ q := &QueryOptions{
+ Datacenter: "foo",
+ }
+ r.setQueryOptions(q)
+ req, err := r.toHTTP()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if req.Method != "DELETE" {
+ t.Fatalf("bad: %v", req)
+ }
+ if req.URL.String() != "http://127.0.0.1:8500/v1/kv/foo?dc=foo" {
+ t.Fatalf("bad: %v", req)
+ }
+}
+
+func TestParseQueryMeta(t *testing.T) {
+ resp := &http.Response{
+ Header: make(map[string][]string),
+ }
+ resp.Header.Set("X-Consul-Index", "12345")
+ resp.Header.Set("X-Consul-LastContact", "80")
+ resp.Header.Set("X-Consul-KnownLeader", "true")
+
+ qm := &QueryMeta{}
+ if err := parseQueryMeta(resp, qm); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if qm.LastIndex != 12345 {
+ t.Fatalf("Bad: %v", qm)
+ }
+ if qm.LastContact != 80*time.Millisecond {
+ t.Fatalf("Bad: %v", qm)
+ }
+ if !qm.KnownLeader {
+ t.Fatalf("Bad: %v", qm)
+ }
+}
diff --git a/vendor/github.com/armon/consul-api/catalog.go b/vendor/github.com/armon/consul-api/catalog.go
new file mode 100644
index 0000000000..8080e2a910
--- /dev/null
+++ b/vendor/github.com/armon/consul-api/catalog.go
@@ -0,0 +1,181 @@
+package consulapi
+
+type Node struct {
+ Node string
+ Address string
+}
+
+type CatalogService struct {
+ Node string
+ Address string
+ ServiceID string
+ ServiceName string
+ ServiceTags []string
+ ServicePort int
+}
+
+type CatalogNode struct {
+ Node *Node
+ Services map[string]*AgentService
+}
+
+type CatalogRegistration struct {
+ Node string
+ Address string
+ Datacenter string
+ Service *AgentService
+ Check *AgentCheck
+}
+
+type CatalogDeregistration struct {
+ Node string
+ Address string
+ Datacenter string
+ ServiceID string
+ CheckID string
+}
+
+// Catalog can be used to query the Catalog endpoints
+type Catalog struct {
+ c *Client
+}
+
+// Catalog returns a handle to the catalog endpoints
+func (c *Client) Catalog() *Catalog {
+ return &Catalog{c}
+}
+
+func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) {
+ r := c.c.newRequest("PUT", "/v1/catalog/register")
+ r.setWriteOptions(q)
+ r.obj = reg
+ rtt, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ resp.Body.Close()
+
+ wm := &WriteMeta{}
+ wm.RequestTime = rtt
+
+ return wm, nil
+}
+
+func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) {
+ r := c.c.newRequest("PUT", "/v1/catalog/deregister")
+ r.setWriteOptions(q)
+ r.obj = dereg
+ rtt, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ resp.Body.Close()
+
+ wm := &WriteMeta{}
+ wm.RequestTime = rtt
+
+ return wm, nil
+}
+
+// Datacenters is used to query for all the known datacenters
+func (c *Catalog) Datacenters() ([]string, error) {
+ r := c.c.newRequest("GET", "/v1/catalog/datacenters")
+ _, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var out []string
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Nodes is used to query all the known nodes
+func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) {
+ r := c.c.newRequest("GET", "/v1/catalog/nodes")
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out []*Node
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
+
+// Services is used to query for all known services
+func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) {
+ r := c.c.newRequest("GET", "/v1/catalog/services")
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out map[string][]string
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
+
+// Service is used to query catalog entries for a given service
+func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
+ r := c.c.newRequest("GET", "/v1/catalog/service/"+service)
+ r.setQueryOptions(q)
+ if tag != "" {
+ r.params.Set("tag", tag)
+ }
+ rtt, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out []*CatalogService
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
+
+// Node is used to query for service information about a single node
+func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) {
+ r := c.c.newRequest("GET", "/v1/catalog/node/"+node)
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out *CatalogNode
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
diff --git a/vendor/github.com/armon/consul-api/catalog_test.go b/vendor/github.com/armon/consul-api/catalog_test.go
new file mode 100644
index 0000000000..7ed6cfc2ce
--- /dev/null
+++ b/vendor/github.com/armon/consul-api/catalog_test.go
@@ -0,0 +1,219 @@
+package consulapi
+
+import (
+ "testing"
+)
+
+func TestCatalog_Datacenters(t *testing.T) {
+ c := makeClient(t)
+ catalog := c.Catalog()
+
+ datacenters, err := catalog.Datacenters()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if len(datacenters) == 0 {
+ t.Fatalf("Bad: %v", datacenters)
+ }
+}
+
+func TestCatalog_Nodes(t *testing.T) {
+ c := makeClient(t)
+ catalog := c.Catalog()
+
+ nodes, meta, err := catalog.Nodes(nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if meta.LastIndex == 0 {
+ t.Fatalf("Bad: %v", meta)
+ }
+
+ if len(nodes) == 0 {
+ t.Fatalf("Bad: %v", nodes)
+ }
+}
+
+func TestCatalog_Services(t *testing.T) {
+ c := makeClient(t)
+ catalog := c.Catalog()
+
+ services, meta, err := catalog.Services(nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if meta.LastIndex == 0 {
+ t.Fatalf("Bad: %v", meta)
+ }
+
+ if len(services) == 0 {
+ t.Fatalf("Bad: %v", services)
+ }
+}
+
+func TestCatalog_Service(t *testing.T) {
+ c := makeClient(t)
+ catalog := c.Catalog()
+
+ services, meta, err := catalog.Service("consul", "", nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if meta.LastIndex == 0 {
+ t.Fatalf("Bad: %v", meta)
+ }
+
+ if len(services) == 0 {
+ t.Fatalf("Bad: %v", services)
+ }
+}
+
+func TestCatalog_Node(t *testing.T) {
+ c := makeClient(t)
+ catalog := c.Catalog()
+
+ name, _ := c.Agent().NodeName()
+ info, meta, err := catalog.Node(name, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if meta.LastIndex == 0 {
+ t.Fatalf("Bad: %v", meta)
+ }
+ if len(info.Services) == 0 {
+ t.Fatalf("Bad: %v", info)
+ }
+}
+
+func TestCatalog_Registration(t *testing.T) {
+ c := makeClient(t)
+ catalog := c.Catalog()
+
+ service := &AgentService{
+ ID: "redis1",
+ Service: "redis",
+ Tags: []string{"master", "v1"},
+ Port: 8000,
+ }
+
+ check := &AgentCheck{
+ Node: "foobar",
+ CheckID: "service:redis1",
+ Name: "Redis health check",
+ Notes: "Script based health check",
+ Status: "passing",
+ ServiceID: "redis1",
+ }
+
+ reg := &CatalogRegistration{
+ Datacenter: "dc1",
+ Node: "foobar",
+ Address: "192.168.10.10",
+ Service: service,
+ Check: check,
+ }
+
+ _, err := catalog.Register(reg, nil)
+
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ node, _, err := catalog.Node("foobar", nil)
+
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if _, ok := node.Services["redis1"]; !ok {
+ t.Fatalf("missing service: redis1")
+ }
+
+ health, _, err := c.Health().Node("foobar", nil)
+
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if health[0].CheckID != "service:redis1" {
+ t.Fatalf("missing checkid service:redis1")
+ }
+}
+
+func TestCatalog_Deregistration(t *testing.T) {
+ c := makeClient(t)
+ catalog := c.Catalog()
+
+ dereg := &CatalogDeregistration{
+ Datacenter: "dc1",
+ Node: "foobar",
+ Address: "192.168.10.10",
+ ServiceID: "redis1",
+ }
+
+ _, err := catalog.Deregister(dereg, nil)
+
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ node, _, err := catalog.Node("foobar", nil)
+
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if _, ok := node.Services["redis1"]; ok {
+ t.Fatalf("ServiceID:redis1 is not deregistered")
+ }
+
+ dereg = &CatalogDeregistration{
+ Datacenter: "dc1",
+ Node: "foobar",
+ Address: "192.168.10.10",
+ CheckID: "service:redis1",
+ }
+
+ _, err = catalog.Deregister(dereg, nil)
+
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ health, _, err := c.Health().Node("foobar", nil)
+
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if len(health) != 0 {
+ t.Fatalf("CheckID:service:redis1 is not deregistered")
+ }
+
+ dereg = &CatalogDeregistration{
+ Datacenter: "dc1",
+ Node: "foobar",
+ Address: "192.168.10.10",
+ }
+
+ _, err = catalog.Deregister(dereg, nil)
+
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ node, _, err = catalog.Node("foobar", nil)
+
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if node != nil {
+ t.Fatalf("node is not deregistered: %v", node)
+ }
+}
diff --git a/vendor/github.com/armon/consul-api/event.go b/vendor/github.com/armon/consul-api/event.go
new file mode 100644
index 0000000000..59813d40fa
--- /dev/null
+++ b/vendor/github.com/armon/consul-api/event.go
@@ -0,0 +1,104 @@
+package consulapi
+
+import (
+ "bytes"
+ "strconv"
+)
+
+// Event can be used to query the Event endpoints
+type Event struct {
+ c *Client
+}
+
+// UserEvent represents an event that was fired by the user
+type UserEvent struct {
+ ID string
+ Name string
+ Payload []byte
+ NodeFilter string
+ ServiceFilter string
+ TagFilter string
+ Version int
+ LTime uint64
+}
+
+// Event returns a handle to the event endpoints
+func (c *Client) Event() *Event {
+ return &Event{c}
+}
+
+// Fire is used to fire a new user event. Only the Name, Payload and Filters
+// are respected. This returns the ID or an associated error. Cross DC requests
+// are supported.
+func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) {
+ r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name)
+ r.setWriteOptions(q)
+ if params.NodeFilter != "" {
+ r.params.Set("node", params.NodeFilter)
+ }
+ if params.ServiceFilter != "" {
+ r.params.Set("service", params.ServiceFilter)
+ }
+ if params.TagFilter != "" {
+ r.params.Set("tag", params.TagFilter)
+ }
+ if params.Payload != nil {
+ r.body = bytes.NewReader(params.Payload)
+ }
+
+ rtt, resp, err := requireOK(e.c.doRequest(r))
+ if err != nil {
+ return "", nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ var out UserEvent
+ if err := decodeBody(resp, &out); err != nil {
+ return "", nil, err
+ }
+ return out.ID, wm, nil
+}
+
+// List is used to get the most recent events an agent has received.
+// This list can be optionally filtered by the name. This endpoint supports
+// quasi-blocking queries. The index is not monotonic, nor does it provide provide
+// LastContact or KnownLeader.
+func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) {
+ r := e.c.newRequest("GET", "/v1/event/list")
+ r.setQueryOptions(q)
+ if name != "" {
+ r.params.Set("name", name)
+ }
+ rtt, resp, err := requireOK(e.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var entries []*UserEvent
+ if err := decodeBody(resp, &entries); err != nil {
+ return nil, nil, err
+ }
+ return entries, qm, nil
+}
+
+// IDToIndex is a bit of a hack. This simulates the index generation to
+// convert an event ID into a WaitIndex.
+func (e *Event) IDToIndex(uuid string) uint64 {
+ lower := uuid[0:8] + uuid[9:13] + uuid[14:18]
+ upper := uuid[19:23] + uuid[24:36]
+ lowVal, err := strconv.ParseUint(lower, 16, 64)
+ if err != nil {
+ panic("Failed to convert " + lower)
+ }
+ highVal, err := strconv.ParseUint(upper, 16, 64)
+ if err != nil {
+ panic("Failed to convert " + upper)
+ }
+ return lowVal ^ highVal
+}
diff --git a/vendor/github.com/armon/consul-api/event_test.go b/vendor/github.com/armon/consul-api/event_test.go
new file mode 100644
index 0000000000..f2be010ad9
--- /dev/null
+++ b/vendor/github.com/armon/consul-api/event_test.go
@@ -0,0 +1,37 @@
+package consulapi
+
+import (
+ "testing"
+)
+
+func TestEvent_FireList(t *testing.T) {
+ c := makeClient(t)
+ event := c.Event()
+
+ params := &UserEvent{Name: "foo"}
+ id, meta, err := event.Fire(params, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if meta.RequestTime == 0 {
+ t.Fatalf("bad: %v", meta)
+ }
+
+ if id == "" {
+ t.Fatalf("invalid: %v", id)
+ }
+
+ events, qm, err := event.List("", nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if qm.LastIndex != event.IDToIndex(id) {
+ t.Fatalf("Bad: %#v", qm)
+ }
+
+ if events[len(events)-1].ID != id {
+ t.Fatalf("bad: %#v", events)
+ }
+}
diff --git a/vendor/github.com/armon/consul-api/health.go b/vendor/github.com/armon/consul-api/health.go
new file mode 100644
index 0000000000..574801e29b
--- /dev/null
+++ b/vendor/github.com/armon/consul-api/health.go
@@ -0,0 +1,136 @@
+package consulapi
+
+import (
+ "fmt"
+)
+
+// HealthCheck is used to represent a single check
+type HealthCheck struct {
+ Node string
+ CheckID string
+ Name string
+ Status string
+ Notes string
+ Output string
+ ServiceID string
+ ServiceName string
+}
+
+// ServiceEntry is used for the health service endpoint
+type ServiceEntry struct {
+ Node *Node
+ Service *AgentService
+ Checks []*HealthCheck
+}
+
+// Health can be used to query the Health endpoints
+type Health struct {
+ c *Client
+}
+
+// Health returns a handle to the health endpoints
+func (c *Client) Health() *Health {
+ return &Health{c}
+}
+
+// Node is used to query for checks belonging to a given node
+func (h *Health) Node(node string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
+ r := h.c.newRequest("GET", "/v1/health/node/"+node)
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(h.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out []*HealthCheck
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
+
+// Checks is used to return the checks associated with a service
+func (h *Health) Checks(service string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
+ r := h.c.newRequest("GET", "/v1/health/checks/"+service)
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(h.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out []*HealthCheck
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
+
+// Service is used to query health information along with service info
+// for a given service. It can optionally do server-side filtering on a tag
+// or nodes with passing health checks only.
+func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
+ r := h.c.newRequest("GET", "/v1/health/service/"+service)
+ r.setQueryOptions(q)
+ if tag != "" {
+ r.params.Set("tag", tag)
+ }
+ if passingOnly {
+ r.params.Set("passing", "1")
+ }
+ rtt, resp, err := requireOK(h.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out []*ServiceEntry
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
+
+// State is used to retreive all the checks in a given state.
+// The wildcard "any" state can also be used for all checks.
+func (h *Health) State(state string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
+ switch state {
+ case "any":
+ case "warning":
+ case "critical":
+ case "passing":
+ case "unknown":
+ default:
+ return nil, nil, fmt.Errorf("Unsupported state: %v", state)
+ }
+ r := h.c.newRequest("GET", "/v1/health/state/"+state)
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(h.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out []*HealthCheck
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
diff --git a/vendor/github.com/armon/consul-api/health_test.go b/vendor/github.com/armon/consul-api/health_test.go
new file mode 100644
index 0000000000..d2b3da2e99
--- /dev/null
+++ b/vendor/github.com/armon/consul-api/health_test.go
@@ -0,0 +1,98 @@
+package consulapi
+
+import (
+ "testing"
+ "time"
+)
+
+func TestHealth_Node(t *testing.T) {
+ c := makeClient(t)
+ agent := c.Agent()
+ health := c.Health()
+
+ info, err := agent.Self()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ name := info["Config"]["NodeName"].(string)
+
+ checks, meta, err := health.Node(name, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if meta.LastIndex == 0 {
+ t.Fatalf("bad: %v", meta)
+ }
+ if len(checks) == 0 {
+ t.Fatalf("Bad: %v", checks)
+ }
+}
+
+func TestHealth_Checks(t *testing.T) {
+ c := makeClient(t)
+ agent := c.Agent()
+ health := c.Health()
+
+ // Make a service with a check
+ reg := &AgentServiceRegistration{
+ Name: "foo",
+ Check: &AgentServiceCheck{
+ TTL: "15s",
+ },
+ }
+ if err := agent.ServiceRegister(reg); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ defer agent.ServiceDeregister("foo")
+
+ // Wait for the register...
+ time.Sleep(20 * time.Millisecond)
+
+ checks, meta, err := health.Checks("foo", nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if meta.LastIndex == 0 {
+ t.Fatalf("bad: %v", meta)
+ }
+ if len(checks) == 0 {
+ t.Fatalf("Bad: %v", checks)
+ }
+}
+
+func TestHealth_Service(t *testing.T) {
+ c := makeClient(t)
+ health := c.Health()
+
+ // consul service should always exist...
+ checks, meta, err := health.Service("consul", "", true, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if meta.LastIndex == 0 {
+ t.Fatalf("bad: %v", meta)
+ }
+ if len(checks) == 0 {
+ t.Fatalf("Bad: %v", checks)
+ }
+}
+
+func TestHealth_State(t *testing.T) {
+ c := makeClient(t)
+ health := c.Health()
+
+ checks, meta, err := health.State("any", nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if meta.LastIndex == 0 {
+ t.Fatalf("bad: %v", meta)
+ }
+ if len(checks) == 0 {
+ t.Fatalf("Bad: %v", checks)
+ }
+}
diff --git a/vendor/github.com/armon/consul-api/kv.go b/vendor/github.com/armon/consul-api/kv.go
new file mode 100644
index 0000000000..98c3b1a035
--- /dev/null
+++ b/vendor/github.com/armon/consul-api/kv.go
@@ -0,0 +1,219 @@
+package consulapi
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net/http"
+ "strconv"
+ "strings"
+)
+
+// KVPair is used to represent a single K/V entry
+type KVPair struct {
+ Key string
+ CreateIndex uint64
+ ModifyIndex uint64
+ LockIndex uint64
+ Flags uint64
+ Value []byte
+ Session string
+}
+
+// KVPairs is a list of KVPair objects
+type KVPairs []*KVPair
+
+// KV is used to manipulate the K/V API
+type KV struct {
+ c *Client
+}
+
+// KV is used to return a handle to the K/V apis
+func (c *Client) KV() *KV {
+ return &KV{c}
+}
+
+// Get is used to lookup a single key
+func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) {
+ resp, qm, err := k.getInternal(key, nil, q)
+ if err != nil {
+ return nil, nil, err
+ }
+ if resp == nil {
+ return nil, qm, nil
+ }
+ defer resp.Body.Close()
+
+ var entries []*KVPair
+ if err := decodeBody(resp, &entries); err != nil {
+ return nil, nil, err
+ }
+ if len(entries) > 0 {
+ return entries[0], qm, nil
+ }
+ return nil, qm, nil
+}
+
+// List is used to lookup all keys under a prefix
+func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) {
+ resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q)
+ if err != nil {
+ return nil, nil, err
+ }
+ if resp == nil {
+ return nil, qm, nil
+ }
+ defer resp.Body.Close()
+
+ var entries []*KVPair
+ if err := decodeBody(resp, &entries); err != nil {
+ return nil, nil, err
+ }
+ return entries, qm, nil
+}
+
+// Keys is used to list all the keys under a prefix. Optionally,
+// a separator can be used to limit the responses.
+func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) {
+ params := map[string]string{"keys": ""}
+ if separator != "" {
+ params["separator"] = separator
+ }
+ resp, qm, err := k.getInternal(prefix, params, q)
+ if err != nil {
+ return nil, nil, err
+ }
+ if resp == nil {
+ return nil, qm, nil
+ }
+ defer resp.Body.Close()
+
+ var entries []string
+ if err := decodeBody(resp, &entries); err != nil {
+ return nil, nil, err
+ }
+ return entries, qm, nil
+}
+
+func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) {
+ r := k.c.newRequest("GET", "/v1/kv/"+key)
+ r.setQueryOptions(q)
+ for param, val := range params {
+ r.params.Set(param, val)
+ }
+ rtt, resp, err := k.c.doRequest(r)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ if resp.StatusCode == 404 {
+ resp.Body.Close()
+ return nil, qm, nil
+ } else if resp.StatusCode != 200 {
+ resp.Body.Close()
+ return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode)
+ }
+ return resp, qm, nil
+}
+
+// Put is used to write a new value. Only the
+// Key, Flags and Value is respected.
+func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) {
+ params := make(map[string]string, 1)
+ if p.Flags != 0 {
+ params["flags"] = strconv.FormatUint(p.Flags, 10)
+ }
+ _, wm, err := k.put(p.Key, params, p.Value, q)
+ return wm, err
+}
+
+// CAS is used for a Check-And-Set operation. The Key,
+// ModifyIndex, Flags and Value are respected. Returns true
+// on success or false on failures.
+func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
+ params := make(map[string]string, 2)
+ if p.Flags != 0 {
+ params["flags"] = strconv.FormatUint(p.Flags, 10)
+ }
+ params["cas"] = strconv.FormatUint(p.ModifyIndex, 10)
+ return k.put(p.Key, params, p.Value, q)
+}
+
+// Acquire is used for a lock acquisiiton operation. The Key,
+// Flags, Value and Session are respected. Returns true
+// on success or false on failures.
+func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
+ params := make(map[string]string, 2)
+ if p.Flags != 0 {
+ params["flags"] = strconv.FormatUint(p.Flags, 10)
+ }
+ params["acquire"] = p.Session
+ return k.put(p.Key, params, p.Value, q)
+}
+
+// Release is used for a lock release operation. The Key,
+// Flags, Value and Session are respected. Returns true
+// on success or false on failures.
+func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
+ params := make(map[string]string, 2)
+ if p.Flags != 0 {
+ params["flags"] = strconv.FormatUint(p.Flags, 10)
+ }
+ params["release"] = p.Session
+ return k.put(p.Key, params, p.Value, q)
+}
+
+func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) {
+ r := k.c.newRequest("PUT", "/v1/kv/"+key)
+ r.setWriteOptions(q)
+ for param, val := range params {
+ r.params.Set(param, val)
+ }
+ r.body = bytes.NewReader(body)
+ rtt, resp, err := requireOK(k.c.doRequest(r))
+ if err != nil {
+ return false, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &WriteMeta{}
+ qm.RequestTime = rtt
+
+ var buf bytes.Buffer
+ if _, err := io.Copy(&buf, resp.Body); err != nil {
+ return false, nil, fmt.Errorf("Failed to read response: %v", err)
+ }
+ res := strings.Contains(string(buf.Bytes()), "true")
+ return res, qm, nil
+}
+
+// Delete is used to delete a single key
+func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) {
+ return k.deleteInternal(key, nil, w)
+}
+
+// DeleteTree is used to delete all keys under a prefix
+func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) {
+ return k.deleteInternal(prefix, []string{"recurse"}, w)
+}
+
+func (k *KV) deleteInternal(key string, params []string, q *WriteOptions) (*WriteMeta, error) {
+ r := k.c.newRequest("DELETE", "/v1/kv/"+key)
+ r.setWriteOptions(q)
+ for _, param := range params {
+ r.params.Set(param, "")
+ }
+ rtt, resp, err := requireOK(k.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ resp.Body.Close()
+
+ qm := &WriteMeta{}
+ qm.RequestTime = rtt
+ return qm, nil
+}
diff --git a/vendor/github.com/armon/consul-api/kv_test.go b/vendor/github.com/armon/consul-api/kv_test.go
new file mode 100644
index 0000000000..2d92d69f62
--- /dev/null
+++ b/vendor/github.com/armon/consul-api/kv_test.go
@@ -0,0 +1,374 @@
+package consulapi
+
+import (
+ "bytes"
+ "path"
+ "testing"
+ "time"
+)
+
+func TestClientPutGetDelete(t *testing.T) {
+ c := makeClient(t)
+ kv := c.KV()
+
+ // Get a get without a key
+ key := testKey()
+ pair, _, err := kv.Get(key, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if pair != nil {
+ t.Fatalf("unexpected value: %#v", pair)
+ }
+
+ // Put the key
+ value := []byte("test")
+ p := &KVPair{Key: key, Flags: 42, Value: value}
+ if _, err := kv.Put(p, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Get should work
+ pair, meta, err := kv.Get(key, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if pair == nil {
+ t.Fatalf("expected value: %#v", pair)
+ }
+ if !bytes.Equal(pair.Value, value) {
+ t.Fatalf("unexpected value: %#v", pair)
+ }
+ if pair.Flags != 42 {
+ t.Fatalf("unexpected value: %#v", pair)
+ }
+ if meta.LastIndex == 0 {
+ t.Fatalf("unexpected value: %#v", meta)
+ }
+
+ // Delete
+ if _, err := kv.Delete(key, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // Get should fail
+ pair, _, err = kv.Get(key, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if pair != nil {
+ t.Fatalf("unexpected value: %#v", pair)
+ }
+}
+
+func TestClient_List_DeleteRecurse(t *testing.T) {
+ c := makeClient(t)
+ kv := c.KV()
+
+ // Generate some test keys
+ prefix := testKey()
+ var keys []string
+ for i := 0; i < 100; i++ {
+ keys = append(keys, path.Join(prefix, testKey()))
+ }
+
+ // Set values
+ value := []byte("test")
+ for _, key := range keys {
+ p := &KVPair{Key: key, Value: value}
+ if _, err := kv.Put(p, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ }
+
+ // List the values
+ pairs, meta, err := kv.List(prefix, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(pairs) != len(keys) {
+ t.Fatalf("got %d keys", len(pairs))
+ }
+ for _, pair := range pairs {
+ if !bytes.Equal(pair.Value, value) {
+ t.Fatalf("unexpected value: %#v", pair)
+ }
+ }
+ if meta.LastIndex == 0 {
+ t.Fatalf("unexpected value: %#v", meta)
+ }
+
+ // Delete all
+ if _, err := kv.DeleteTree(prefix, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // List the values
+ pairs, _, err = kv.List(prefix, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(pairs) != 0 {
+ t.Fatalf("got %d keys", len(pairs))
+ }
+}
+
+func TestClient_CAS(t *testing.T) {
+ c := makeClient(t)
+ kv := c.KV()
+
+ // Put the key
+ key := testKey()
+ value := []byte("test")
+ p := &KVPair{Key: key, Value: value}
+ if work, _, err := kv.CAS(p, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ } else if !work {
+ t.Fatalf("CAS failure")
+ }
+
+ // Get should work
+ pair, meta, err := kv.Get(key, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if pair == nil {
+ t.Fatalf("expected value: %#v", pair)
+ }
+ if meta.LastIndex == 0 {
+ t.Fatalf("unexpected value: %#v", meta)
+ }
+
+ // CAS update with bad index
+ newVal := []byte("foo")
+ p.Value = newVal
+ p.ModifyIndex = 1
+ if work, _, err := kv.CAS(p, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ } else if work {
+ t.Fatalf("unexpected CAS")
+ }
+
+ // CAS update with valid index
+ p.ModifyIndex = meta.LastIndex
+ if work, _, err := kv.CAS(p, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ } else if !work {
+ t.Fatalf("unexpected CAS failure")
+ }
+}
+
+func TestClient_WatchGet(t *testing.T) {
+ c := makeClient(t)
+ kv := c.KV()
+
+ // Get a get without a key
+ key := testKey()
+ pair, meta, err := kv.Get(key, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if pair != nil {
+ t.Fatalf("unexpected value: %#v", pair)
+ }
+ if meta.LastIndex == 0 {
+ t.Fatalf("unexpected value: %#v", meta)
+ }
+
+ // Put the key
+ value := []byte("test")
+ go func() {
+ c := makeClient(t)
+ kv := c.KV()
+
+ time.Sleep(100 * time.Millisecond)
+ p := &KVPair{Key: key, Flags: 42, Value: value}
+ if _, err := kv.Put(p, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ }()
+
+ // Get should work
+ options := &QueryOptions{WaitIndex: meta.LastIndex}
+ pair, meta2, err := kv.Get(key, options)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if pair == nil {
+ t.Fatalf("expected value: %#v", pair)
+ }
+ if !bytes.Equal(pair.Value, value) {
+ t.Fatalf("unexpected value: %#v", pair)
+ }
+ if pair.Flags != 42 {
+ t.Fatalf("unexpected value: %#v", pair)
+ }
+ if meta2.LastIndex <= meta.LastIndex {
+ t.Fatalf("unexpected value: %#v", meta2)
+ }
+}
+
+func TestClient_WatchList(t *testing.T) {
+ c := makeClient(t)
+ kv := c.KV()
+
+ // Get a get without a key
+ prefix := testKey()
+ key := path.Join(prefix, testKey())
+ pairs, meta, err := kv.List(prefix, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(pairs) != 0 {
+ t.Fatalf("unexpected value: %#v", pairs)
+ }
+ if meta.LastIndex == 0 {
+ t.Fatalf("unexpected value: %#v", meta)
+ }
+
+ // Put the key
+ value := []byte("test")
+ go func() {
+ c := makeClient(t)
+ kv := c.KV()
+
+ time.Sleep(100 * time.Millisecond)
+ p := &KVPair{Key: key, Flags: 42, Value: value}
+ if _, err := kv.Put(p, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ }()
+
+ // Get should work
+ options := &QueryOptions{WaitIndex: meta.LastIndex}
+ pairs, meta2, err := kv.List(prefix, options)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(pairs) != 1 {
+ t.Fatalf("expected value: %#v", pairs)
+ }
+ if !bytes.Equal(pairs[0].Value, value) {
+ t.Fatalf("unexpected value: %#v", pairs)
+ }
+ if pairs[0].Flags != 42 {
+ t.Fatalf("unexpected value: %#v", pairs)
+ }
+ if meta2.LastIndex <= meta.LastIndex {
+ t.Fatalf("unexpected value: %#v", meta2)
+ }
+
+}
+
+func TestClient_Keys_DeleteRecurse(t *testing.T) {
+ c := makeClient(t)
+ kv := c.KV()
+
+ // Generate some test keys
+ prefix := testKey()
+ var keys []string
+ for i := 0; i < 100; i++ {
+ keys = append(keys, path.Join(prefix, testKey()))
+ }
+
+ // Set values
+ value := []byte("test")
+ for _, key := range keys {
+ p := &KVPair{Key: key, Value: value}
+ if _, err := kv.Put(p, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ }
+
+ // List the values
+ out, meta, err := kv.Keys(prefix, "", nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(out) != len(keys) {
+ t.Fatalf("got %d keys", len(out))
+ }
+ if meta.LastIndex == 0 {
+ t.Fatalf("unexpected value: %#v", meta)
+ }
+
+ // Delete all
+ if _, err := kv.DeleteTree(prefix, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ // List the values
+ out, _, err = kv.Keys(prefix, "", nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(out) != 0 {
+ t.Fatalf("got %d keys", len(out))
+ }
+}
+
+func TestClient_AcquireRelease(t *testing.T) {
+ c := makeClient(t)
+ session := c.Session()
+ kv := c.KV()
+
+ // Make a session
+ id, _, err := session.CreateNoChecks(nil, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ defer session.Destroy(id, nil)
+
+ // Acquire the key
+ key := testKey()
+ value := []byte("test")
+ p := &KVPair{Key: key, Value: value, Session: id}
+ if work, _, err := kv.Acquire(p, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ } else if !work {
+ t.Fatalf("Lock failure")
+ }
+
+ // Get should work
+ pair, meta, err := kv.Get(key, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if pair == nil {
+ t.Fatalf("expected value: %#v", pair)
+ }
+ if pair.LockIndex != 1 {
+ t.Fatalf("Expected lock: %v", pair)
+ }
+ if pair.Session != id {
+ t.Fatalf("Expected lock: %v", pair)
+ }
+ if meta.LastIndex == 0 {
+ t.Fatalf("unexpected value: %#v", meta)
+ }
+
+ // Release
+ if work, _, err := kv.Release(p, nil); err != nil {
+ t.Fatalf("err: %v", err)
+ } else if !work {
+ t.Fatalf("Release fail")
+ }
+
+ // Get should work
+ pair, meta, err = kv.Get(key, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if pair == nil {
+ t.Fatalf("expected value: %#v", pair)
+ }
+ if pair.LockIndex != 1 {
+ t.Fatalf("Expected lock: %v", pair)
+ }
+ if pair.Session != "" {
+ t.Fatalf("Expected unlock: %v", pair)
+ }
+ if meta.LastIndex == 0 {
+ t.Fatalf("unexpected value: %#v", meta)
+ }
+}
diff --git a/vendor/github.com/armon/consul-api/session.go b/vendor/github.com/armon/consul-api/session.go
new file mode 100644
index 0000000000..4fbfc5ee9a
--- /dev/null
+++ b/vendor/github.com/armon/consul-api/session.go
@@ -0,0 +1,204 @@
+package consulapi
+
+import (
+ "time"
+)
+
+// SessionEntry represents a session in consul
+type SessionEntry struct {
+ CreateIndex uint64
+ ID string
+ Name string
+ Node string
+ Checks []string
+ LockDelay time.Duration
+ Behavior string
+ TTL string
+}
+
+// Session can be used to query the Session endpoints
+type Session struct {
+ c *Client
+}
+
+// Session returns a handle to the session endpoints
+func (c *Client) Session() *Session {
+ return &Session{c}
+}
+
+// CreateNoChecks is like Create but is used specifically to create
+// a session with no associated health checks.
+func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
+ body := make(map[string]interface{})
+ body["Checks"] = []string{}
+ if se != nil {
+ if se.Name != "" {
+ body["Name"] = se.Name
+ }
+ if se.Node != "" {
+ body["Node"] = se.Node
+ }
+ if se.LockDelay != 0 {
+ body["LockDelay"] = durToMsec(se.LockDelay)
+ }
+ if se.Behavior != "" {
+ body["Behavior"] = se.Behavior
+ }
+ if se.TTL != "" {
+ body["TTL"] = se.TTL
+ }
+ }
+ return s.create(body, q)
+
+}
+
+// Create makes a new session. Providing a session entry can
+// customize the session. It can also be nil to use defaults.
+func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
+ var obj interface{}
+ if se != nil {
+ body := make(map[string]interface{})
+ obj = body
+ if se.Name != "" {
+ body["Name"] = se.Name
+ }
+ if se.Node != "" {
+ body["Node"] = se.Node
+ }
+ if se.LockDelay != 0 {
+ body["LockDelay"] = durToMsec(se.LockDelay)
+ }
+ if len(se.Checks) > 0 {
+ body["Checks"] = se.Checks
+ }
+ if se.Behavior != "" {
+ body["Behavior"] = se.Behavior
+ }
+ if se.TTL != "" {
+ body["TTL"] = se.TTL
+ }
+ }
+ return s.create(obj, q)
+}
+
+func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) {
+ r := s.c.newRequest("PUT", "/v1/session/create")
+ r.setWriteOptions(q)
+ r.obj = obj
+ rtt, resp, err := requireOK(s.c.doRequest(r))
+ if err != nil {
+ return "", nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ var out struct{ ID string }
+ if err := decodeBody(resp, &out); err != nil {
+ return "", nil, err
+ }
+ return out.ID, wm, nil
+}
+
+// Destroy invalides a given session
+func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
+ r := s.c.newRequest("PUT", "/v1/session/destroy/"+id)
+ r.setWriteOptions(q)
+ rtt, resp, err := requireOK(s.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ return wm, nil
+}
+
+// Renew renews the TTL on a given session
+func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) {
+ r := s.c.newRequest("PUT", "/v1/session/renew/"+id)
+ r.setWriteOptions(q)
+ rtt, resp, err := requireOK(s.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+
+ var entries []*SessionEntry
+ if err := decodeBody(resp, &entries); err != nil {
+ return nil, wm, err
+ }
+
+ if len(entries) > 0 {
+ return entries[0], wm, nil
+ }
+ return nil, wm, nil
+}
+
+// Info looks up a single session
+func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) {
+ r := s.c.newRequest("GET", "/v1/session/info/"+id)
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(s.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var entries []*SessionEntry
+ if err := decodeBody(resp, &entries); err != nil {
+ return nil, nil, err
+ }
+
+ if len(entries) > 0 {
+ return entries[0], qm, nil
+ }
+ return nil, qm, nil
+}
+
+// List gets sessions for a node
+func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) {
+ r := s.c.newRequest("GET", "/v1/session/node/"+node)
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(s.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var entries []*SessionEntry
+ if err := decodeBody(resp, &entries); err != nil {
+ return nil, nil, err
+ }
+ return entries, qm, nil
+}
+
+// List gets all active sessions
+func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) {
+ r := s.c.newRequest("GET", "/v1/session/list")
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(s.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var entries []*SessionEntry
+ if err := decodeBody(resp, &entries); err != nil {
+ return nil, nil, err
+ }
+ return entries, qm, nil
+}
diff --git a/vendor/github.com/armon/consul-api/session_test.go b/vendor/github.com/armon/consul-api/session_test.go
new file mode 100644
index 0000000000..9351c999ef
--- /dev/null
+++ b/vendor/github.com/armon/consul-api/session_test.go
@@ -0,0 +1,190 @@
+package consulapi
+
+import (
+ "testing"
+)
+
+func TestSession_CreateDestroy(t *testing.T) {
+ c := makeClient(t)
+ session := c.Session()
+
+ id, meta, err := session.Create(nil, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if meta.RequestTime == 0 {
+ t.Fatalf("bad: %v", meta)
+ }
+
+ if id == "" {
+ t.Fatalf("invalid: %v", id)
+ }
+
+ meta, err = session.Destroy(id, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if meta.RequestTime == 0 {
+ t.Fatalf("bad: %v", meta)
+ }
+}
+
+func TestSession_CreateRenewDestroy(t *testing.T) {
+ c := makeClient(t)
+ session := c.Session()
+
+ se := &SessionEntry{
+ TTL: "10s",
+ }
+
+ id, meta, err := session.Create(se, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ defer session.Destroy(id, nil)
+
+ if meta.RequestTime == 0 {
+ t.Fatalf("bad: %v", meta)
+ }
+
+ if id == "" {
+ t.Fatalf("invalid: %v", id)
+ }
+
+ if meta.RequestTime == 0 {
+ t.Fatalf("bad: %v", meta)
+ }
+
+ renew, meta, err := session.Renew(id, nil)
+
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if meta.RequestTime == 0 {
+ t.Fatalf("bad: %v", meta)
+ }
+
+ if renew == nil {
+ t.Fatalf("should get session")
+ }
+
+ if renew.ID != id {
+ t.Fatalf("should have matching id")
+ }
+
+ if renew.TTL != "10s" {
+ t.Fatalf("should get session with TTL")
+ }
+}
+
+func TestSession_Info(t *testing.T) {
+ c := makeClient(t)
+ session := c.Session()
+
+ id, _, err := session.Create(nil, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ defer session.Destroy(id, nil)
+
+ info, qm, err := session.Info(id, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if qm.LastIndex == 0 {
+ t.Fatalf("bad: %v", qm)
+ }
+ if !qm.KnownLeader {
+ t.Fatalf("bad: %v", qm)
+ }
+
+ if info == nil {
+ t.Fatalf("should get session")
+ }
+ if info.CreateIndex == 0 {
+ t.Fatalf("bad: %v", info)
+ }
+ if info.ID != id {
+ t.Fatalf("bad: %v", info)
+ }
+ if info.Name != "" {
+ t.Fatalf("bad: %v", info)
+ }
+ if info.Node == "" {
+ t.Fatalf("bad: %v", info)
+ }
+ if len(info.Checks) == 0 {
+ t.Fatalf("bad: %v", info)
+ }
+ if info.LockDelay == 0 {
+ t.Fatalf("bad: %v", info)
+ }
+ if info.Behavior != "release" {
+ t.Fatalf("bad: %v", info)
+ }
+ if info.TTL != "" {
+ t.Fatalf("bad: %v", info)
+ }
+}
+
+func TestSession_Node(t *testing.T) {
+ c := makeClient(t)
+ session := c.Session()
+
+ id, _, err := session.Create(nil, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ defer session.Destroy(id, nil)
+
+ info, qm, err := session.Info(id, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ sessions, qm, err := session.Node(info.Node, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if len(sessions) != 1 {
+ t.Fatalf("bad: %v", sessions)
+ }
+
+ if qm.LastIndex == 0 {
+ t.Fatalf("bad: %v", qm)
+ }
+ if !qm.KnownLeader {
+ t.Fatalf("bad: %v", qm)
+ }
+}
+
+func TestSession_List(t *testing.T) {
+ c := makeClient(t)
+ session := c.Session()
+
+ id, _, err := session.Create(nil, nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ defer session.Destroy(id, nil)
+
+ sessions, qm, err := session.List(nil)
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+
+ if len(sessions) != 1 {
+ t.Fatalf("bad: %v", sessions)
+ }
+
+ if qm.LastIndex == 0 {
+ t.Fatalf("bad: %v", qm)
+ }
+ if !qm.KnownLeader {
+ t.Fatalf("bad: %v", qm)
+ }
+}
diff --git a/vendor/github.com/armon/consul-api/status.go b/vendor/github.com/armon/consul-api/status.go
new file mode 100644
index 0000000000..21c31982f4
--- /dev/null
+++ b/vendor/github.com/armon/consul-api/status.go
@@ -0,0 +1,43 @@
+package consulapi
+
+// Status can be used to query the Status endpoints
+type Status struct {
+ c *Client
+}
+
+// Status returns a handle to the status endpoints
+func (c *Client) Status() *Status {
+ return &Status{c}
+}
+
+// Leader is used to query for a known leader
+func (s *Status) Leader() (string, error) {
+ r := s.c.newRequest("GET", "/v1/status/leader")
+ _, resp, err := requireOK(s.c.doRequest(r))
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+
+ var leader string
+ if err := decodeBody(resp, &leader); err != nil {
+ return "", err
+ }
+ return leader, nil
+}
+
+// Peers is used to query for a known raft peers
+func (s *Status) Peers() ([]string, error) {
+ r := s.c.newRequest("GET", "/v1/status/peers")
+ _, resp, err := requireOK(s.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var peers []string
+ if err := decodeBody(resp, &peers); err != nil {
+ return nil, err
+ }
+ return peers, nil
+}
diff --git a/vendor/github.com/armon/consul-api/status_test.go b/vendor/github.com/armon/consul-api/status_test.go
new file mode 100644
index 0000000000..ab9b42f503
--- /dev/null
+++ b/vendor/github.com/armon/consul-api/status_test.go
@@ -0,0 +1,31 @@
+package consulapi
+
+import (
+ "testing"
+)
+
+func TestStatusLeader(t *testing.T) {
+ c := makeClient(t)
+ status := c.Status()
+
+ leader, err := status.Leader()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if leader == "" {
+ t.Fatalf("Expected leader")
+ }
+}
+
+func TestStatusPeers(t *testing.T) {
+ c := makeClient(t)
+ status := c.Status()
+
+ peers, err := status.Peers()
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if len(peers) == 0 {
+ t.Fatalf("Expected peers ")
+ }
+}
diff --git a/vendor/github.com/coreos/go-etcd/.gitignore b/vendor/github.com/coreos/go-etcd/.gitignore
new file mode 100644
index 0000000000..d344ba6b06
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/.gitignore
@@ -0,0 +1 @@
+config.json
diff --git a/vendor/github.com/coreos/go-etcd/LICENSE b/vendor/github.com/coreos/go-etcd/LICENSE
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/coreos/go-etcd/README.md b/vendor/github.com/coreos/go-etcd/README.md
new file mode 100644
index 0000000000..4cd43d1bff
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/README.md
@@ -0,0 +1,58 @@
+# go-etcd
+
+[![GoDoc](https://godoc.org/github.com/coreos/go-etcd/etcd?status.png)](https://godoc.org/github.com/coreos/go-etcd/etcd)
+
+# DEPRECATED
+
+etcd now has an [official Go client](https://github.com/coreos/etcd/tree/master/client), which has
+a nicer API and better support.
+
+We strongly suggest you use the official Go client instead of go-etcd in your new projects.
+For existing projects, we suggest you migrate to the official Go client.
+
+## Usage
+
+The current version of go-etcd supports etcd v2.0+, if you need support for etcd v0.4 please use go-etcd from the [release-0.4](https://github.com/coreos/go-etcd/tree/release-0.4) branch.
+
+```
+package main
+
+import (
+ "log"
+
+ "github.com/coreos/go-etcd/etcd"
+)
+
+func main() {
+ machines := []string{"http://127.0.0.1:2379"}
+ client := etcd.NewClient(machines)
+
+ if _, err := client.Set("/foo", "bar", 0); err != nil {
+ log.Fatal(err)
+ }
+}
+```
+
+## Install
+
+```bash
+go get github.com/coreos/go-etcd/etcd
+```
+
+## Caveat
+
+1. go-etcd always talks to one member if the member works well. This saves socket resources, and improves efficiency for both client and server side. It doesn't hurt the consistent view of the client because each etcd member has data replication.
+
+2. go-etcd does round-robin rotation when it fails to connect the member in use. For example, if the member that go-etcd connects to is hard killed, go-etcd will fail on the first attempt with the killed member, and succeed on the second attempt with another member. The default CheckRetry function does 2*machine_number retries before returning error.
+
+3. The default transport in go-etcd sets 1s DialTimeout and 1s TCP keepalive period. A customized transport could be set by calling `Client.SetTransport`.
+
+4. Default go-etcd cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't help in this scenario because operating system may still send TCP keep-alive packets. We will improve it, but it is not in high priority because we don't see a solid real-life case which server is stopped but connection is alive.
+
+5. go-etcd is not thread-safe, and it may have race when switching member or updating cluster.
+
+6. go-etcd cannot detect whether the member in use is healthy when doing read requests. If the member is isolated from the cluster, go-etcd may retrieve outdated data. We will improve this.
+
+## License
+
+See LICENSE file.
diff --git a/vendor/github.com/coreos/go-etcd/etcd/add_child.go b/vendor/github.com/coreos/go-etcd/etcd/add_child.go
new file mode 100644
index 0000000000..7122be049e
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/add_child.go
@@ -0,0 +1,23 @@
+package etcd
+
+// Add a new directory with a random etcd-generated key under the given path.
+func (c *Client) AddChildDir(key string, ttl uint64) (*Response, error) {
+ raw, err := c.post(key, "", ttl)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return raw.Unmarshal()
+}
+
+// Add a new file with a random etcd-generated key under the given path.
+func (c *Client) AddChild(key string, value string, ttl uint64) (*Response, error) {
+ raw, err := c.post(key, value, ttl)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return raw.Unmarshal()
+}
diff --git a/vendor/github.com/coreos/go-etcd/etcd/add_child_test.go b/vendor/github.com/coreos/go-etcd/etcd/add_child_test.go
new file mode 100644
index 0000000000..26223ff1c8
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/add_child_test.go
@@ -0,0 +1,73 @@
+package etcd
+
+import "testing"
+
+func TestAddChild(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("fooDir", true)
+ c.Delete("nonexistentDir", true)
+ }()
+
+ c.CreateDir("fooDir", 5)
+
+ _, err := c.AddChild("fooDir", "v0", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = c.AddChild("fooDir", "v1", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resp, err := c.Get("fooDir", true, false)
+ // The child with v0 should proceed the child with v1 because it's added
+ // earlier, so it should have a lower key.
+ if !(len(resp.Node.Nodes) == 2 && (resp.Node.Nodes[0].Value == "v0" && resp.Node.Nodes[1].Value == "v1")) {
+ t.Fatalf("AddChild 1 failed. There should be two chlidren whose values are v0 and v1, respectively."+
+ " The response was: %#v", resp)
+ }
+
+ // Creating a child under a nonexistent directory should succeed.
+ // The directory should be created.
+ resp, err = c.AddChild("nonexistentDir", "foo", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestAddChildDir(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("fooDir", true)
+ c.Delete("nonexistentDir", true)
+ }()
+
+ c.CreateDir("fooDir", 5)
+
+ _, err := c.AddChildDir("fooDir", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = c.AddChildDir("fooDir", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resp, err := c.Get("fooDir", true, false)
+ // The child with v0 should proceed the child with v1 because it's added
+ // earlier, so it should have a lower key.
+ if !(len(resp.Node.Nodes) == 2 && (len(resp.Node.Nodes[0].Nodes) == 0 && len(resp.Node.Nodes[1].Nodes) == 0)) {
+ t.Fatalf("AddChildDir 1 failed. There should be two chlidren whose values are v0 and v1, respectively."+
+ " The response was: %#v", resp)
+ }
+
+ // Creating a child under a nonexistent directory should succeed.
+ // The directory should be created.
+ resp, err = c.AddChildDir("nonexistentDir", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/vendor/github.com/coreos/go-etcd/etcd/client.go b/vendor/github.com/coreos/go-etcd/etcd/client.go
new file mode 100644
index 0000000000..60ed762b99
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/client.go
@@ -0,0 +1,476 @@
+package etcd
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/json"
+ "errors"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "path"
+ "strings"
+ "time"
+)
+
+// See SetConsistency for how to use these constants.
+const (
+ // Using strings rather than iota because the consistency level
+ // could be persisted to disk, so it'd be better to use
+ // human-readable values.
+ STRONG_CONSISTENCY = "STRONG"
+ WEAK_CONSISTENCY = "WEAK"
+)
+
+const (
+ defaultBufferSize = 10
+)
+
+func init() {
+ rand.Seed(int64(time.Now().Nanosecond()))
+}
+
+type Config struct {
+ CertFile string `json:"certFile"`
+ KeyFile string `json:"keyFile"`
+ CaCertFile []string `json:"caCertFiles"`
+ DialTimeout time.Duration `json:"timeout"`
+ Consistency string `json:"consistency"`
+}
+
+type credentials struct {
+ username string
+ password string
+}
+
+type Client struct {
+ config Config `json:"config"`
+ cluster *Cluster `json:"cluster"`
+ httpClient *http.Client
+ credentials *credentials
+ transport *http.Transport
+ persistence io.Writer
+ cURLch chan string
+ // CheckRetry can be used to control the policy for failed requests
+ // and modify the cluster if needed.
+ // The client calls it before sending requests again, and
+ // stops retrying if CheckRetry returns some error. The cases that
+ // this function needs to handle include no response and unexpected
+ // http status code of response.
+ // If CheckRetry is nil, client will call the default one
+ // `DefaultCheckRetry`.
+ // Argument cluster is the etcd.Cluster object that these requests have been made on.
+ // Argument numReqs is the number of http.Requests that have been made so far.
+ // Argument lastResp is the http.Responses from the last request.
+ // Argument err is the reason of the failure.
+ CheckRetry func(cluster *Cluster, numReqs int,
+ lastResp http.Response, err error) error
+}
+
+// NewClient create a basic client that is configured to be used
+// with the given machine list.
+func NewClient(machines []string) *Client {
+ config := Config{
+ // default timeout is one second
+ DialTimeout: time.Second,
+ Consistency: WEAK_CONSISTENCY,
+ }
+
+ client := &Client{
+ cluster: NewCluster(machines),
+ config: config,
+ }
+
+ client.initHTTPClient()
+ client.saveConfig()
+
+ return client
+}
+
+// NewTLSClient create a basic client with TLS configuration
+func NewTLSClient(machines []string, cert, key, caCert string) (*Client, error) {
+ // overwrite the default machine to use https
+ if len(machines) == 0 {
+ machines = []string{"https://127.0.0.1:4001"}
+ }
+
+ config := Config{
+ // default timeout is one second
+ DialTimeout: time.Second,
+ Consistency: WEAK_CONSISTENCY,
+ CertFile: cert,
+ KeyFile: key,
+ CaCertFile: make([]string, 0),
+ }
+
+ client := &Client{
+ cluster: NewCluster(machines),
+ config: config,
+ }
+
+ err := client.initHTTPSClient(cert, key)
+ if err != nil {
+ return nil, err
+ }
+
+ err = client.AddRootCA(caCert)
+
+ client.saveConfig()
+
+ return client, nil
+}
+
+// NewClientFromFile creates a client from a given file path.
+// The given file is expected to use the JSON format.
+func NewClientFromFile(fpath string) (*Client, error) {
+ fi, err := os.Open(fpath)
+ if err != nil {
+ return nil, err
+ }
+
+ defer func() {
+ if err := fi.Close(); err != nil {
+ panic(err)
+ }
+ }()
+
+ return NewClientFromReader(fi)
+}
+
+// NewClientFromReader creates a Client configured from a given reader.
+// The configuration is expected to use the JSON format.
+func NewClientFromReader(reader io.Reader) (*Client, error) {
+ c := new(Client)
+
+ b, err := ioutil.ReadAll(reader)
+ if err != nil {
+ return nil, err
+ }
+
+ err = json.Unmarshal(b, c)
+ if err != nil {
+ return nil, err
+ }
+ if c.config.CertFile == "" {
+ c.initHTTPClient()
+ } else {
+ err = c.initHTTPSClient(c.config.CertFile, c.config.KeyFile)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ for _, caCert := range c.config.CaCertFile {
+ if err := c.AddRootCA(caCert); err != nil {
+ return nil, err
+ }
+ }
+
+ return c, nil
+}
+
+// Override the Client's HTTP Transport object
+func (c *Client) SetTransport(tr *http.Transport) {
+ c.httpClient.Transport = tr
+ c.transport = tr
+}
+
+func (c *Client) SetCredentials(username, password string) {
+ c.credentials = &credentials{username, password}
+}
+
+func (c *Client) Close() {
+ c.transport.DisableKeepAlives = true
+ c.transport.CloseIdleConnections()
+}
+
+// initHTTPClient initializes a HTTP client for etcd client
+func (c *Client) initHTTPClient() {
+ c.transport = &http.Transport{
+ Dial: c.DefaultDial,
+ TLSClientConfig: &tls.Config{
+ InsecureSkipVerify: true,
+ },
+ }
+ c.httpClient = &http.Client{Transport: c.transport}
+}
+
+// initHTTPClient initializes a HTTPS client for etcd client
+func (c *Client) initHTTPSClient(cert, key string) error {
+ if cert == "" || key == "" {
+ return errors.New("Require both cert and key path")
+ }
+
+ tlsCert, err := tls.LoadX509KeyPair(cert, key)
+ if err != nil {
+ return err
+ }
+
+ tlsConfig := &tls.Config{
+ Certificates: []tls.Certificate{tlsCert},
+ InsecureSkipVerify: true,
+ }
+
+ c.transport = &http.Transport{
+ TLSClientConfig: tlsConfig,
+ Dial: c.DefaultDial,
+ }
+
+ c.httpClient = &http.Client{Transport: c.transport}
+ return nil
+}
+
+// SetPersistence sets a writer to which the config will be
+// written every time it's changed.
+func (c *Client) SetPersistence(writer io.Writer) {
+ c.persistence = writer
+}
+
+// SetConsistency changes the consistency level of the client.
+//
+// When consistency is set to STRONG_CONSISTENCY, all requests,
+// including GET, are sent to the leader. This means that, assuming
+// the absence of leader failures, GET requests are guaranteed to see
+// the changes made by previous requests.
+//
+// When consistency is set to WEAK_CONSISTENCY, other requests
+// are still sent to the leader, but GET requests are sent to a
+// random server from the server pool. This reduces the read
+// load on the leader, but it's not guaranteed that the GET requests
+// will see changes made by previous requests (they might have not
+// yet been committed on non-leader servers).
+func (c *Client) SetConsistency(consistency string) error {
+ if !(consistency == STRONG_CONSISTENCY || consistency == WEAK_CONSISTENCY) {
+ return errors.New("The argument must be either STRONG_CONSISTENCY or WEAK_CONSISTENCY.")
+ }
+ c.config.Consistency = consistency
+ return nil
+}
+
+// Sets the DialTimeout value
+func (c *Client) SetDialTimeout(d time.Duration) {
+ c.config.DialTimeout = d
+}
+
+// AddRootCA adds a root CA cert for the etcd client
+func (c *Client) AddRootCA(caCert string) error {
+ if c.httpClient == nil {
+ return errors.New("Client has not been initialized yet!")
+ }
+
+ certBytes, err := ioutil.ReadFile(caCert)
+ if err != nil {
+ return err
+ }
+
+ tr, ok := c.httpClient.Transport.(*http.Transport)
+
+ if !ok {
+ panic("AddRootCA(): Transport type assert should not fail")
+ }
+
+ if tr.TLSClientConfig.RootCAs == nil {
+ caCertPool := x509.NewCertPool()
+ ok = caCertPool.AppendCertsFromPEM(certBytes)
+ if ok {
+ tr.TLSClientConfig.RootCAs = caCertPool
+ }
+ tr.TLSClientConfig.InsecureSkipVerify = false
+ } else {
+ ok = tr.TLSClientConfig.RootCAs.AppendCertsFromPEM(certBytes)
+ }
+
+ if !ok {
+ err = errors.New("Unable to load caCert")
+ }
+
+ c.config.CaCertFile = append(c.config.CaCertFile, caCert)
+ c.saveConfig()
+
+ return err
+}
+
+// SetCluster updates cluster information using the given machine list.
+func (c *Client) SetCluster(machines []string) bool {
+ success := c.internalSyncCluster(machines)
+ return success
+}
+
+func (c *Client) GetCluster() []string {
+ return c.cluster.Machines
+}
+
+// SyncCluster updates the cluster information using the internal machine list.
+// If no members are found, the intenral machine list is left untouched.
+func (c *Client) SyncCluster() bool {
+ return c.internalSyncCluster(c.cluster.Machines)
+}
+
+// internalSyncCluster syncs cluster information using the given machine list.
+func (c *Client) internalSyncCluster(machines []string) bool {
+ // comma-separated list of machines in the cluster.
+ members := ""
+
+ for _, machine := range machines {
+ httpPath := c.createHttpPath(machine, path.Join(version, "members"))
+ resp, err := c.httpClient.Get(httpPath)
+ if err != nil {
+ // try another machine in the cluster
+ continue
+ }
+
+ if resp.StatusCode != http.StatusOK { // fall-back to old endpoint
+ httpPath := c.createHttpPath(machine, path.Join(version, "machines"))
+ resp, err := c.httpClient.Get(httpPath)
+ if err != nil {
+ // try another machine in the cluster
+ continue
+ }
+ b, err := ioutil.ReadAll(resp.Body)
+ resp.Body.Close()
+ if err != nil {
+ // try another machine in the cluster
+ continue
+ }
+ members = string(b)
+ } else {
+ b, err := ioutil.ReadAll(resp.Body)
+ resp.Body.Close()
+ if err != nil {
+ // try another machine in the cluster
+ continue
+ }
+
+ var mCollection memberCollection
+ if err := json.Unmarshal(b, &mCollection); err != nil {
+ // try another machine
+ continue
+ }
+
+ urls := make([]string, 0)
+ for _, m := range mCollection {
+ urls = append(urls, m.ClientURLs...)
+ }
+
+ members = strings.Join(urls, ",")
+ }
+
+ // We should never do an empty cluster update.
+ if members == "" {
+ continue
+ }
+
+ // update Machines List
+ c.cluster.updateFromStr(members)
+ logger.Debug("sync.machines ", c.cluster.Machines)
+ c.saveConfig()
+ return true
+ }
+
+ return false
+}
+
+// createHttpPath creates a complete HTTP URL.
+// serverName should contain both the host name and a port number, if any.
+func (c *Client) createHttpPath(serverName string, _path string) string {
+ u, err := url.Parse(serverName)
+ if err != nil {
+ panic(err)
+ }
+
+ u.Path = path.Join(u.Path, _path)
+
+ if u.Scheme == "" {
+ u.Scheme = "http"
+ }
+ return u.String()
+}
+
+// DefaultDial attempts to open a TCP connection to the provided address, explicitly
+// enabling keep-alives with a one-second interval.
+func (c *Client) DefaultDial(network, addr string) (net.Conn, error) {
+ dialer := net.Dialer{
+ Timeout: c.config.DialTimeout,
+ KeepAlive: time.Second,
+ }
+
+ return dialer.Dial(network, addr)
+}
+
+func (c *Client) OpenCURL() {
+ c.cURLch = make(chan string, defaultBufferSize)
+}
+
+func (c *Client) CloseCURL() {
+ c.cURLch = nil
+}
+
+func (c *Client) sendCURL(command string) {
+ go func() {
+ select {
+ case c.cURLch <- command:
+ default:
+ }
+ }()
+}
+
+func (c *Client) RecvCURL() string {
+ return <-c.cURLch
+}
+
+// saveConfig saves the current config using c.persistence.
+func (c *Client) saveConfig() error {
+ if c.persistence != nil {
+ b, err := json.Marshal(c)
+ if err != nil {
+ return err
+ }
+
+ _, err = c.persistence.Write(b)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MarshalJSON implements the Marshaller interface
+// as defined by the standard JSON package.
+func (c *Client) MarshalJSON() ([]byte, error) {
+ b, err := json.Marshal(struct {
+ Config Config `json:"config"`
+ Cluster *Cluster `json:"cluster"`
+ }{
+ Config: c.config,
+ Cluster: c.cluster,
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ return b, nil
+}
+
+// UnmarshalJSON implements the Unmarshaller interface
+// as defined by the standard JSON package.
+func (c *Client) UnmarshalJSON(b []byte) error {
+ temp := struct {
+ Config Config `json:"config"`
+ Cluster *Cluster `json:"cluster"`
+ }{}
+ err := json.Unmarshal(b, &temp)
+ if err != nil {
+ return err
+ }
+
+ c.cluster = temp.Cluster
+ c.config = temp.Config
+ return nil
+}
diff --git a/vendor/github.com/coreos/go-etcd/etcd/client_test.go b/vendor/github.com/coreos/go-etcd/etcd/client_test.go
new file mode 100644
index 0000000000..4720d8d693
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/client_test.go
@@ -0,0 +1,108 @@
+package etcd
+
+import (
+ "encoding/json"
+ "fmt"
+ "net"
+ "net/url"
+ "os"
+ "testing"
+)
+
+// To pass this test, we need to create a cluster of 3 machines
+// The server should be listening on localhost:4001, 4002, 4003
+func TestSync(t *testing.T) {
+ fmt.Println("Make sure there are three nodes at 0.0.0.0:4001-4003")
+
+ // Explicit trailing slash to ensure this doesn't reproduce:
+ // https://github.com/coreos/go-etcd/issues/82
+ c := NewClient([]string{"http://127.0.0.1:4001/"})
+
+ success := c.SyncCluster()
+ if !success {
+ t.Fatal("cannot sync machines")
+ }
+
+ for _, m := range c.GetCluster() {
+ u, err := url.Parse(m)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if u.Scheme != "http" {
+ t.Fatal("scheme must be http")
+ }
+
+ host, _, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if host != "localhost" {
+ t.Fatal("Host must be localhost")
+ }
+ }
+
+ badMachines := []string{"abc", "edef"}
+
+ success = c.SetCluster(badMachines)
+
+ if success {
+ t.Fatal("should not sync on bad machines")
+ }
+
+ goodMachines := []string{"127.0.0.1:4002"}
+
+ success = c.SetCluster(goodMachines)
+
+ if !success {
+ t.Fatal("cannot sync machines")
+ } else {
+ fmt.Println(c.cluster.Machines)
+ }
+
+}
+
+func TestPersistence(t *testing.T) {
+ c := NewClient(nil)
+ c.SyncCluster()
+
+ fo, err := os.Create("config.json")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ if err := fo.Close(); err != nil {
+ panic(err)
+ }
+ }()
+
+ c.SetPersistence(fo)
+ err = c.saveConfig()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ c2, err := NewClientFromFile("config.json")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Verify that the two clients have the same config
+ b1, _ := json.Marshal(c)
+ b2, _ := json.Marshal(c2)
+
+ if string(b1) != string(b2) {
+ t.Fatalf("The two configs should be equal!")
+ }
+}
+
+func TestClientRetry(t *testing.T) {
+ c := NewClient([]string{"http://strange", "http://127.0.0.1:4001"})
+ // use first endpoint as the picked url
+ c.cluster.picked = 0
+ if _, err := c.Set("foo", "bar", 5); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := c.Delete("foo", true); err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/vendor/github.com/coreos/go-etcd/etcd/cluster.go b/vendor/github.com/coreos/go-etcd/etcd/cluster.go
new file mode 100644
index 0000000000..d0461e17a2
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/cluster.go
@@ -0,0 +1,54 @@
+package etcd
+
+import (
+ "math/rand"
+ "strings"
+ "sync"
+)
+
+type Cluster struct {
+ Leader string `json:"leader"`
+ Machines []string `json:"machines"`
+ picked int
+ mu sync.RWMutex
+}
+
+func NewCluster(machines []string) *Cluster {
+ // if an empty slice was sent in then just assume HTTP 4001 on localhost
+ if len(machines) == 0 {
+ machines = []string{"http://127.0.0.1:4001"}
+ }
+
+ machines = shuffleStringSlice(machines)
+ logger.Debug("Shuffle cluster machines", machines)
+ // default leader and machines
+ return &Cluster{
+ Leader: "",
+ Machines: machines,
+ picked: rand.Intn(len(machines)),
+ }
+}
+
+func (cl *Cluster) failure() {
+ cl.mu.Lock()
+ defer cl.mu.Unlock()
+ cl.picked = (cl.picked + 1) % len(cl.Machines)
+}
+
+func (cl *Cluster) pick() string {
+ cl.mu.Lock()
+ defer cl.mu.Unlock()
+ return cl.Machines[cl.picked]
+}
+
+func (cl *Cluster) updateFromStr(machines string) {
+ cl.mu.Lock()
+ defer cl.mu.Unlock()
+
+ cl.Machines = strings.Split(machines, ",")
+ for i := range cl.Machines {
+ cl.Machines[i] = strings.TrimSpace(cl.Machines[i])
+ }
+ cl.Machines = shuffleStringSlice(cl.Machines)
+ cl.picked = rand.Intn(len(cl.Machines))
+}
diff --git a/vendor/github.com/coreos/go-etcd/etcd/compare_and_delete.go b/vendor/github.com/coreos/go-etcd/etcd/compare_and_delete.go
new file mode 100644
index 0000000000..11131bb760
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/compare_and_delete.go
@@ -0,0 +1,34 @@
+package etcd
+
+import "fmt"
+
+func (c *Client) CompareAndDelete(key string, prevValue string, prevIndex uint64) (*Response, error) {
+ raw, err := c.RawCompareAndDelete(key, prevValue, prevIndex)
+ if err != nil {
+ return nil, err
+ }
+
+ return raw.Unmarshal()
+}
+
+func (c *Client) RawCompareAndDelete(key string, prevValue string, prevIndex uint64) (*RawResponse, error) {
+ if prevValue == "" && prevIndex == 0 {
+ return nil, fmt.Errorf("You must give either prevValue or prevIndex.")
+ }
+
+ options := Options{}
+ if prevValue != "" {
+ options["prevValue"] = prevValue
+ }
+ if prevIndex != 0 {
+ options["prevIndex"] = prevIndex
+ }
+
+ raw, err := c.delete(key, options)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return raw, err
+}
diff --git a/vendor/github.com/coreos/go-etcd/etcd/compare_and_delete_test.go b/vendor/github.com/coreos/go-etcd/etcd/compare_and_delete_test.go
new file mode 100644
index 0000000000..223e50f291
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/compare_and_delete_test.go
@@ -0,0 +1,46 @@
+package etcd
+
+import (
+ "testing"
+)
+
+func TestCompareAndDelete(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("foo", true)
+ }()
+
+ c.Set("foo", "bar", 5)
+
+ // This should succeed an correct prevValue
+ resp, err := c.CompareAndDelete("foo", "bar", 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
+ t.Fatalf("CompareAndDelete 1 prevNode failed: %#v", resp)
+ }
+
+ resp, _ = c.Set("foo", "bar", 5)
+ // This should fail because it gives an incorrect prevValue
+ _, err = c.CompareAndDelete("foo", "xxx", 0)
+ if err == nil {
+ t.Fatalf("CompareAndDelete 2 should have failed. The response is: %#v", resp)
+ }
+
+ // This should succeed because it gives an correct prevIndex
+ resp, err = c.CompareAndDelete("foo", "", resp.Node.ModifiedIndex)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
+ t.Fatalf("CompareAndSwap 3 prevNode failed: %#v", resp)
+ }
+
+ c.Set("foo", "bar", 5)
+ // This should fail because it gives an incorrect prevIndex
+ resp, err = c.CompareAndDelete("foo", "", 29817514)
+ if err == nil {
+ t.Fatalf("CompareAndDelete 4 should have failed. The response is: %#v", resp)
+ }
+}
diff --git a/vendor/github.com/coreos/go-etcd/etcd/compare_and_swap.go b/vendor/github.com/coreos/go-etcd/etcd/compare_and_swap.go
new file mode 100644
index 0000000000..bb4f90643a
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/compare_and_swap.go
@@ -0,0 +1,36 @@
+package etcd
+
+import "fmt"
+
+func (c *Client) CompareAndSwap(key string, value string, ttl uint64,
+ prevValue string, prevIndex uint64) (*Response, error) {
+ raw, err := c.RawCompareAndSwap(key, value, ttl, prevValue, prevIndex)
+ if err != nil {
+ return nil, err
+ }
+
+ return raw.Unmarshal()
+}
+
+func (c *Client) RawCompareAndSwap(key string, value string, ttl uint64,
+ prevValue string, prevIndex uint64) (*RawResponse, error) {
+ if prevValue == "" && prevIndex == 0 {
+ return nil, fmt.Errorf("You must give either prevValue or prevIndex.")
+ }
+
+ options := Options{}
+ if prevValue != "" {
+ options["prevValue"] = prevValue
+ }
+ if prevIndex != 0 {
+ options["prevIndex"] = prevIndex
+ }
+
+ raw, err := c.put(key, value, ttl, options)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return raw, err
+}
diff --git a/vendor/github.com/coreos/go-etcd/etcd/compare_and_swap_test.go b/vendor/github.com/coreos/go-etcd/etcd/compare_and_swap_test.go
new file mode 100644
index 0000000000..14a1b00f5a
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/compare_and_swap_test.go
@@ -0,0 +1,57 @@
+package etcd
+
+import (
+ "testing"
+)
+
+func TestCompareAndSwap(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("foo", true)
+ }()
+
+ c.Set("foo", "bar", 5)
+
+ // This should succeed
+ resp, err := c.CompareAndSwap("foo", "bar2", 5, "bar", 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.Node.Value == "bar2" && resp.Node.Key == "/foo" && resp.Node.TTL == 5) {
+ t.Fatalf("CompareAndSwap 1 failed: %#v", resp)
+ }
+
+ if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
+ t.Fatalf("CompareAndSwap 1 prevNode failed: %#v", resp)
+ }
+
+ // This should fail because it gives an incorrect prevValue
+ resp, err = c.CompareAndSwap("foo", "bar3", 5, "xxx", 0)
+ if err == nil {
+ t.Fatalf("CompareAndSwap 2 should have failed. The response is: %#v", resp)
+ }
+
+ resp, err = c.Set("foo", "bar", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // This should succeed
+ resp, err = c.CompareAndSwap("foo", "bar2", 5, "", resp.Node.ModifiedIndex)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.Node.Value == "bar2" && resp.Node.Key == "/foo" && resp.Node.TTL == 5) {
+ t.Fatalf("CompareAndSwap 3 failed: %#v", resp)
+ }
+
+ if !(resp.PrevNode.Value == "bar" && resp.PrevNode.Key == "/foo" && resp.PrevNode.TTL == 5) {
+ t.Fatalf("CompareAndSwap 3 prevNode failed: %#v", resp)
+ }
+
+ // This should fail because it gives an incorrect prevIndex
+ resp, err = c.CompareAndSwap("foo", "bar3", 5, "", 29817514)
+ if err == nil {
+ t.Fatalf("CompareAndSwap 4 should have failed. The response is: %#v", resp)
+ }
+}
diff --git a/vendor/github.com/coreos/go-etcd/etcd/debug.go b/vendor/github.com/coreos/go-etcd/etcd/debug.go
new file mode 100644
index 0000000000..0f777886ba
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/debug.go
@@ -0,0 +1,55 @@
+package etcd
+
+import (
+ "fmt"
+ "io/ioutil"
+ "log"
+ "strings"
+)
+
+var logger *etcdLogger
+
+func SetLogger(l *log.Logger) {
+ logger = &etcdLogger{l}
+}
+
+func GetLogger() *log.Logger {
+ return logger.log
+}
+
+type etcdLogger struct {
+ log *log.Logger
+}
+
+func (p *etcdLogger) Debug(args ...interface{}) {
+ msg := "DEBUG: " + fmt.Sprint(args...)
+ p.log.Println(msg)
+}
+
+func (p *etcdLogger) Debugf(f string, args ...interface{}) {
+ msg := "DEBUG: " + fmt.Sprintf(f, args...)
+ // Append newline if necessary
+ if !strings.HasSuffix(msg, "\n") {
+ msg = msg + "\n"
+ }
+ p.log.Print(msg)
+}
+
+func (p *etcdLogger) Warning(args ...interface{}) {
+ msg := "WARNING: " + fmt.Sprint(args...)
+ p.log.Println(msg)
+}
+
+func (p *etcdLogger) Warningf(f string, args ...interface{}) {
+ msg := "WARNING: " + fmt.Sprintf(f, args...)
+ // Append newline if necessary
+ if !strings.HasSuffix(msg, "\n") {
+ msg = msg + "\n"
+ }
+ p.log.Print(msg)
+}
+
+func init() {
+ // Default logger uses the go default log.
+ SetLogger(log.New(ioutil.Discard, "go-etcd", log.LstdFlags))
+}
diff --git a/vendor/github.com/coreos/go-etcd/etcd/debug_test.go b/vendor/github.com/coreos/go-etcd/etcd/debug_test.go
new file mode 100644
index 0000000000..97f6d1110b
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/debug_test.go
@@ -0,0 +1,28 @@
+package etcd
+
+import (
+ "testing"
+)
+
+type Foo struct{}
+type Bar struct {
+ one string
+ two int
+}
+
+// Tests that logs don't panic with arbitrary interfaces
+func TestDebug(t *testing.T) {
+ f := &Foo{}
+ b := &Bar{"asfd", 3}
+ for _, test := range []interface{}{
+ 1234,
+ "asdf",
+ f,
+ b,
+ } {
+ logger.Debug(test)
+ logger.Debugf("something, %s", test)
+ logger.Warning(test)
+ logger.Warningf("something, %s", test)
+ }
+}
diff --git a/vendor/github.com/coreos/go-etcd/etcd/delete.go b/vendor/github.com/coreos/go-etcd/etcd/delete.go
new file mode 100644
index 0000000000..b37accd7db
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/delete.go
@@ -0,0 +1,40 @@
+package etcd
+
+// Delete deletes the given key.
+//
+// When recursive set to false, if the key points to a
+// directory the method will fail.
+//
+// When recursive set to true, if the key points to a file,
+// the file will be deleted; if the key points to a directory,
+// then everything under the directory (including all child directories)
+// will be deleted.
+func (c *Client) Delete(key string, recursive bool) (*Response, error) {
+ raw, err := c.RawDelete(key, recursive, false)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return raw.Unmarshal()
+}
+
+// DeleteDir deletes an empty directory or a key value pair
+func (c *Client) DeleteDir(key string) (*Response, error) {
+ raw, err := c.RawDelete(key, false, true)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return raw.Unmarshal()
+}
+
+func (c *Client) RawDelete(key string, recursive bool, dir bool) (*RawResponse, error) {
+ ops := Options{
+ "recursive": recursive,
+ "dir": dir,
+ }
+
+ return c.delete(key, ops)
+}
diff --git a/vendor/github.com/coreos/go-etcd/etcd/delete_test.go b/vendor/github.com/coreos/go-etcd/etcd/delete_test.go
new file mode 100644
index 0000000000..5904971556
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/delete_test.go
@@ -0,0 +1,81 @@
+package etcd
+
+import (
+ "testing"
+)
+
+func TestDelete(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("foo", true)
+ }()
+
+ c.Set("foo", "bar", 5)
+ resp, err := c.Delete("foo", false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !(resp.Node.Value == "") {
+ t.Fatalf("Delete failed with %s", resp.Node.Value)
+ }
+
+ if !(resp.PrevNode.Value == "bar") {
+ t.Fatalf("Delete PrevNode failed with %s", resp.Node.Value)
+ }
+
+ resp, err = c.Delete("foo", false)
+ if err == nil {
+ t.Fatalf("Delete should have failed because the key foo did not exist. "+
+ "The response was: %v", resp)
+ }
+}
+
+func TestDeleteAll(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("foo", true)
+ c.Delete("fooDir", true)
+ }()
+
+ c.SetDir("foo", 5)
+ // test delete an empty dir
+ resp, err := c.DeleteDir("foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !(resp.Node.Value == "") {
+ t.Fatalf("DeleteAll 1 failed: %#v", resp)
+ }
+
+ if !(resp.PrevNode.Dir == true && resp.PrevNode.Value == "") {
+ t.Fatalf("DeleteAll 1 PrevNode failed: %#v", resp)
+ }
+
+ c.CreateDir("fooDir", 5)
+ c.Set("fooDir/foo", "bar", 5)
+ _, err = c.DeleteDir("fooDir")
+ if err == nil {
+ t.Fatal("should not able to delete a non-empty dir with deletedir")
+ }
+
+ resp, err = c.Delete("fooDir", true)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !(resp.Node.Value == "") {
+ t.Fatalf("DeleteAll 2 failed: %#v", resp)
+ }
+
+ if !(resp.PrevNode.Dir == true && resp.PrevNode.Value == "") {
+ t.Fatalf("DeleteAll 2 PrevNode failed: %#v", resp)
+ }
+
+ resp, err = c.Delete("foo", true)
+ if err == nil {
+ t.Fatalf("DeleteAll should have failed because the key foo did not exist. "+
+ "The response was: %v", resp)
+ }
+}
diff --git a/vendor/github.com/coreos/go-etcd/etcd/error.go b/vendor/github.com/coreos/go-etcd/etcd/error.go
new file mode 100644
index 0000000000..66dca54b5c
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/error.go
@@ -0,0 +1,49 @@
+package etcd
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+const (
+ ErrCodeEtcdNotReachable = 501
+ ErrCodeUnhandledHTTPStatus = 502
+)
+
+var (
+ errorMap = map[int]string{
+ ErrCodeEtcdNotReachable: "All the given peers are not reachable",
+ }
+)
+
+type EtcdError struct {
+ ErrorCode int `json:"errorCode"`
+ Message string `json:"message"`
+ Cause string `json:"cause,omitempty"`
+ Index uint64 `json:"index"`
+}
+
+func (e EtcdError) Error() string {
+ return fmt.Sprintf("%v: %v (%v) [%v]", e.ErrorCode, e.Message, e.Cause, e.Index)
+}
+
+func newError(errorCode int, cause string, index uint64) *EtcdError {
+ return &EtcdError{
+ ErrorCode: errorCode,
+ Message: errorMap[errorCode],
+ Cause: cause,
+ Index: index,
+ }
+}
+
+func handleError(b []byte) error {
+ etcdErr := new(EtcdError)
+
+ err := json.Unmarshal(b, etcdErr)
+ if err != nil {
+ logger.Warningf("cannot unmarshal etcd error: %v", err)
+ return err
+ }
+
+ return etcdErr
+}
diff --git a/vendor/github.com/coreos/go-etcd/etcd/get.go b/vendor/github.com/coreos/go-etcd/etcd/get.go
new file mode 100644
index 0000000000..09fe641c25
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/get.go
@@ -0,0 +1,32 @@
+package etcd
+
+// Get gets the file or directory associated with the given key.
+// If the key points to a directory, files and directories under
+// it will be returned in sorted or unsorted order, depending on
+// the sort flag.
+// If recursive is set to false, contents under child directories
+// will not be returned.
+// If recursive is set to true, all the contents will be returned.
+func (c *Client) Get(key string, sort, recursive bool) (*Response, error) {
+ raw, err := c.RawGet(key, sort, recursive)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return raw.Unmarshal()
+}
+
+func (c *Client) RawGet(key string, sort, recursive bool) (*RawResponse, error) {
+ var q bool
+ if c.config.Consistency == STRONG_CONSISTENCY {
+ q = true
+ }
+ ops := Options{
+ "recursive": recursive,
+ "sorted": sort,
+ "quorum": q,
+ }
+
+ return c.get(key, ops)
+}
diff --git a/vendor/github.com/coreos/go-etcd/etcd/get_test.go b/vendor/github.com/coreos/go-etcd/etcd/get_test.go
new file mode 100644
index 0000000000..279c4e26f8
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/get_test.go
@@ -0,0 +1,131 @@
+package etcd
+
+import (
+ "reflect"
+ "testing"
+)
+
+// cleanNode scrubs Expiration, ModifiedIndex and CreatedIndex of a node.
+func cleanNode(n *Node) {
+ n.Expiration = nil
+ n.ModifiedIndex = 0
+ n.CreatedIndex = 0
+}
+
+// cleanResult scrubs a result object two levels deep of Expiration,
+// ModifiedIndex and CreatedIndex.
+func cleanResult(result *Response) {
+ // TODO(philips): make this recursive.
+ cleanNode(result.Node)
+ for i, _ := range result.Node.Nodes {
+ cleanNode(result.Node.Nodes[i])
+ for j, _ := range result.Node.Nodes[i].Nodes {
+ cleanNode(result.Node.Nodes[i].Nodes[j])
+ }
+ }
+}
+
+func TestGet(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("foo", true)
+ }()
+
+ c.Set("foo", "bar", 5)
+
+ result, err := c.Get("foo", false, false)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if result.Node.Key != "/foo" || result.Node.Value != "bar" {
+ t.Fatalf("Get failed with %s %s %v", result.Node.Key, result.Node.Value, result.Node.TTL)
+ }
+
+ result, err = c.Get("goo", false, false)
+ if err == nil {
+ t.Fatalf("should not be able to get non-exist key")
+ }
+}
+
+func TestGetAll(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("fooDir", true)
+ }()
+
+ c.CreateDir("fooDir", 5)
+ c.Set("fooDir/k0", "v0", 5)
+ c.Set("fooDir/k1", "v1", 5)
+
+ // Return kv-pairs in sorted order
+ result, err := c.Get("fooDir", true, false)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expected := Nodes{
+ &Node{
+ Key: "/fooDir/k0",
+ Value: "v0",
+ TTL: 5,
+ },
+ &Node{
+ Key: "/fooDir/k1",
+ Value: "v1",
+ TTL: 5,
+ },
+ }
+
+ cleanResult(result)
+
+ if !reflect.DeepEqual(result.Node.Nodes, expected) {
+ t.Fatalf("(actual) %v != (expected) %v", result.Node.Nodes, expected)
+ }
+
+ // Test the `recursive` option
+ c.CreateDir("fooDir/childDir", 5)
+ c.Set("fooDir/childDir/k2", "v2", 5)
+
+ // Return kv-pairs in sorted order
+ result, err = c.Get("fooDir", true, true)
+
+ cleanResult(result)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expected = Nodes{
+ &Node{
+ Key: "/fooDir/childDir",
+ Dir: true,
+ Nodes: Nodes{
+ &Node{
+ Key: "/fooDir/childDir/k2",
+ Value: "v2",
+ TTL: 5,
+ },
+ },
+ TTL: 5,
+ },
+ &Node{
+ Key: "/fooDir/k0",
+ Value: "v0",
+ TTL: 5,
+ },
+ &Node{
+ Key: "/fooDir/k1",
+ Value: "v1",
+ TTL: 5,
+ },
+ }
+
+ cleanResult(result)
+
+ if !reflect.DeepEqual(result.Node.Nodes, expected) {
+ t.Fatalf("(actual) %v != (expected) %v", result.Node.Nodes, expected)
+ }
+}
diff --git a/vendor/github.com/coreos/go-etcd/etcd/member.go b/vendor/github.com/coreos/go-etcd/etcd/member.go
new file mode 100644
index 0000000000..5b13b28e1a
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/member.go
@@ -0,0 +1,30 @@
+package etcd
+
+import "encoding/json"
+
+type Member struct {
+ ID string `json:"id"`
+ Name string `json:"name"`
+ PeerURLs []string `json:"peerURLs"`
+ ClientURLs []string `json:"clientURLs"`
+}
+
+type memberCollection []Member
+
+func (c *memberCollection) UnmarshalJSON(data []byte) error {
+ d := struct {
+ Members []Member
+ }{}
+
+ if err := json.Unmarshal(data, &d); err != nil {
+ return err
+ }
+
+ if d.Members == nil {
+ *c = make([]Member, 0)
+ return nil
+ }
+
+ *c = d.Members
+ return nil
+}
diff --git a/vendor/github.com/coreos/go-etcd/etcd/member_test.go b/vendor/github.com/coreos/go-etcd/etcd/member_test.go
new file mode 100644
index 0000000000..53ebdd4bfd
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/member_test.go
@@ -0,0 +1,71 @@
+package etcd
+
+import (
+ "encoding/json"
+ "reflect"
+ "testing"
+)
+
+func TestMemberCollectionUnmarshal(t *testing.T) {
+ tests := []struct {
+ body []byte
+ want memberCollection
+ }{
+ {
+ body: []byte(`{"members":[]}`),
+ want: memberCollection([]Member{}),
+ },
+ {
+ body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`),
+ want: memberCollection(
+ []Member{
+ {
+ ID: "2745e2525fce8fe",
+ Name: "node3",
+ PeerURLs: []string{
+ "http://127.0.0.1:7003",
+ },
+ ClientURLs: []string{
+ "http://127.0.0.1:4003",
+ },
+ },
+ {
+ ID: "42134f434382925",
+ Name: "node1",
+ PeerURLs: []string{
+ "http://127.0.0.1:2380",
+ "http://127.0.0.1:7001",
+ },
+ ClientURLs: []string{
+ "http://127.0.0.1:2379",
+ "http://127.0.0.1:4001",
+ },
+ },
+ {
+ ID: "94088180e21eb87b",
+ Name: "node2",
+ PeerURLs: []string{
+ "http://127.0.0.1:7002",
+ },
+ ClientURLs: []string{
+ "http://127.0.0.1:4002",
+ },
+ },
+ },
+ ),
+ },
+ }
+
+ for i, tt := range tests {
+ var got memberCollection
+ err := json.Unmarshal(tt.body, &got)
+ if err != nil {
+ t.Errorf("#%d: unexpected error: %v", i, err)
+ continue
+ }
+
+ if !reflect.DeepEqual(tt.want, got) {
+ t.Errorf("#%d: incorrect output: want=%#v, got=%#v", i, tt.want, got)
+ }
+ }
+}
diff --git a/vendor/github.com/coreos/go-etcd/etcd/options.go b/vendor/github.com/coreos/go-etcd/etcd/options.go
new file mode 100644
index 0000000000..d21c96f080
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/options.go
@@ -0,0 +1,72 @@
+package etcd
+
+import (
+ "fmt"
+ "net/url"
+ "reflect"
+)
+
+type Options map[string]interface{}
+
+// An internally-used data structure that represents a mapping
+// between valid options and their kinds
+type validOptions map[string]reflect.Kind
+
+// Valid options for GET, PUT, POST, DELETE
+// Using CAPITALIZED_UNDERSCORE to emphasize that these
+// values are meant to be used as constants.
+var (
+ VALID_GET_OPTIONS = validOptions{
+ "recursive": reflect.Bool,
+ "quorum": reflect.Bool,
+ "sorted": reflect.Bool,
+ "wait": reflect.Bool,
+ "waitIndex": reflect.Uint64,
+ }
+
+ VALID_PUT_OPTIONS = validOptions{
+ "prevValue": reflect.String,
+ "prevIndex": reflect.Uint64,
+ "prevExist": reflect.Bool,
+ "dir": reflect.Bool,
+ }
+
+ VALID_POST_OPTIONS = validOptions{}
+
+ VALID_DELETE_OPTIONS = validOptions{
+ "recursive": reflect.Bool,
+ "dir": reflect.Bool,
+ "prevValue": reflect.String,
+ "prevIndex": reflect.Uint64,
+ }
+)
+
+// Convert options to a string of HTML parameters
+func (ops Options) toParameters(validOps validOptions) (string, error) {
+ p := "?"
+ values := url.Values{}
+
+ if ops == nil {
+ return "", nil
+ }
+
+ for k, v := range ops {
+ // Check if the given option is valid (that it exists)
+ kind := validOps[k]
+ if kind == reflect.Invalid {
+ return "", fmt.Errorf("Invalid option: %v", k)
+ }
+
+ // Check if the given option is of the valid type
+ t := reflect.TypeOf(v)
+ if kind != t.Kind() {
+ return "", fmt.Errorf("Option %s should be of %v kind, not of %v kind.",
+ k, kind, t.Kind())
+ }
+
+ values.Set(k, fmt.Sprintf("%v", v))
+ }
+
+ p += values.Encode()
+ return p, nil
+}
diff --git a/vendor/github.com/coreos/go-etcd/etcd/requests.go b/vendor/github.com/coreos/go-etcd/etcd/requests.go
new file mode 100644
index 0000000000..8f720f6f44
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/requests.go
@@ -0,0 +1,403 @@
+package etcd
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "path"
+ "strings"
+ "sync"
+ "time"
+)
+
+// Errors introduced by handling requests
+var (
+ ErrRequestCancelled = errors.New("sending request is cancelled")
+)
+
+type RawRequest struct {
+ Method string
+ RelativePath string
+ Values url.Values
+ Cancel <-chan bool
+}
+
+// NewRawRequest returns a new RawRequest
+func NewRawRequest(method, relativePath string, values url.Values, cancel <-chan bool) *RawRequest {
+ return &RawRequest{
+ Method: method,
+ RelativePath: relativePath,
+ Values: values,
+ Cancel: cancel,
+ }
+}
+
+// getCancelable issues a cancelable GET request
+func (c *Client) getCancelable(key string, options Options,
+ cancel <-chan bool) (*RawResponse, error) {
+ logger.Debugf("get %s [%s]", key, c.cluster.pick())
+ p := keyToPath(key)
+
+ str, err := options.toParameters(VALID_GET_OPTIONS)
+ if err != nil {
+ return nil, err
+ }
+ p += str
+
+ req := NewRawRequest("GET", p, nil, cancel)
+ resp, err := c.SendRequest(req)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return resp, nil
+}
+
+// get issues a GET request
+func (c *Client) get(key string, options Options) (*RawResponse, error) {
+ return c.getCancelable(key, options, nil)
+}
+
+// put issues a PUT request
+func (c *Client) put(key string, value string, ttl uint64,
+ options Options) (*RawResponse, error) {
+
+ logger.Debugf("put %s, %s, ttl: %d, [%s]", key, value, ttl, c.cluster.pick())
+ p := keyToPath(key)
+
+ str, err := options.toParameters(VALID_PUT_OPTIONS)
+ if err != nil {
+ return nil, err
+ }
+ p += str
+
+ req := NewRawRequest("PUT", p, buildValues(value, ttl), nil)
+ resp, err := c.SendRequest(req)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return resp, nil
+}
+
+// post issues a POST request
+func (c *Client) post(key string, value string, ttl uint64) (*RawResponse, error) {
+ logger.Debugf("post %s, %s, ttl: %d, [%s]", key, value, ttl, c.cluster.pick())
+ p := keyToPath(key)
+
+ req := NewRawRequest("POST", p, buildValues(value, ttl), nil)
+ resp, err := c.SendRequest(req)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return resp, nil
+}
+
+// delete issues a DELETE request
+func (c *Client) delete(key string, options Options) (*RawResponse, error) {
+ logger.Debugf("delete %s [%s]", key, c.cluster.pick())
+ p := keyToPath(key)
+
+ str, err := options.toParameters(VALID_DELETE_OPTIONS)
+ if err != nil {
+ return nil, err
+ }
+ p += str
+
+ req := NewRawRequest("DELETE", p, nil, nil)
+ resp, err := c.SendRequest(req)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return resp, nil
+}
+
+// SendRequest sends a HTTP request and returns a Response as defined by etcd
+func (c *Client) SendRequest(rr *RawRequest) (*RawResponse, error) {
+ var req *http.Request
+ var resp *http.Response
+ var httpPath string
+ var err error
+ var respBody []byte
+
+ var numReqs = 1
+
+ checkRetry := c.CheckRetry
+ if checkRetry == nil {
+ checkRetry = DefaultCheckRetry
+ }
+
+ cancelled := make(chan bool, 1)
+ reqLock := new(sync.Mutex)
+
+ if rr.Cancel != nil {
+ cancelRoutine := make(chan bool)
+ defer close(cancelRoutine)
+
+ go func() {
+ select {
+ case <-rr.Cancel:
+ cancelled <- true
+ logger.Debug("send.request is cancelled")
+ case <-cancelRoutine:
+ return
+ }
+
+ // Repeat canceling request until this thread is stopped
+ // because we have no idea about whether it succeeds.
+ for {
+ reqLock.Lock()
+ c.httpClient.Transport.(*http.Transport).CancelRequest(req)
+ reqLock.Unlock()
+
+ select {
+ case <-time.After(100 * time.Millisecond):
+ case <-cancelRoutine:
+ return
+ }
+ }
+ }()
+ }
+
+ // If we connect to a follower and consistency is required, retry until
+ // we connect to a leader
+ sleep := 25 * time.Millisecond
+ maxSleep := time.Second
+
+ for attempt := 0; ; attempt++ {
+ if attempt > 0 {
+ select {
+ case <-cancelled:
+ return nil, ErrRequestCancelled
+ case <-time.After(sleep):
+ sleep = sleep * 2
+ if sleep > maxSleep {
+ sleep = maxSleep
+ }
+ }
+ }
+
+ logger.Debug("Connecting to etcd: attempt ", attempt+1, " for ", rr.RelativePath)
+
+ // get httpPath if not set
+ if httpPath == "" {
+ httpPath = c.getHttpPath(rr.RelativePath)
+ }
+
+ // Return a cURL command if curlChan is set
+ if c.cURLch != nil {
+ command := fmt.Sprintf("curl -X %s %s", rr.Method, httpPath)
+ for key, value := range rr.Values {
+ command += fmt.Sprintf(" -d %s=%s", key, value[0])
+ }
+ if c.credentials != nil {
+ command += fmt.Sprintf(" -u %s", c.credentials.username)
+ }
+ c.sendCURL(command)
+ }
+
+ logger.Debug("send.request.to ", httpPath, " | method ", rr.Method)
+
+ req, err := func() (*http.Request, error) {
+ reqLock.Lock()
+ defer reqLock.Unlock()
+
+ if rr.Values == nil {
+ if req, err = http.NewRequest(rr.Method, httpPath, nil); err != nil {
+ return nil, err
+ }
+ } else {
+ body := strings.NewReader(rr.Values.Encode())
+ if req, err = http.NewRequest(rr.Method, httpPath, body); err != nil {
+ return nil, err
+ }
+
+ req.Header.Set("Content-Type",
+ "application/x-www-form-urlencoded; param=value")
+ }
+ return req, nil
+ }()
+
+ if err != nil {
+ return nil, err
+ }
+
+ if c.credentials != nil {
+ req.SetBasicAuth(c.credentials.username, c.credentials.password)
+ }
+
+ resp, err = c.httpClient.Do(req)
+ // clear previous httpPath
+ httpPath = ""
+ defer func() {
+ if resp != nil {
+ resp.Body.Close()
+ }
+ }()
+
+ // If the request was cancelled, return ErrRequestCancelled directly
+ select {
+ case <-cancelled:
+ return nil, ErrRequestCancelled
+ default:
+ }
+
+ numReqs++
+
+ // network error, change a machine!
+ if err != nil {
+ logger.Debug("network error: ", err.Error())
+ lastResp := http.Response{}
+ if checkErr := checkRetry(c.cluster, numReqs, lastResp, err); checkErr != nil {
+ return nil, checkErr
+ }
+
+ c.cluster.failure()
+ continue
+ }
+
+ // if there is no error, it should receive response
+ logger.Debug("recv.response.from ", httpPath)
+
+ if validHttpStatusCode[resp.StatusCode] {
+ // try to read byte code and break the loop
+ respBody, err = ioutil.ReadAll(resp.Body)
+ if err == nil {
+ logger.Debug("recv.success ", httpPath)
+ break
+ }
+ // ReadAll error may be caused due to cancel request
+ select {
+ case <-cancelled:
+ return nil, ErrRequestCancelled
+ default:
+ }
+
+ if err == io.ErrUnexpectedEOF {
+ // underlying connection was closed prematurely, probably by timeout
+ // TODO: empty body or unexpectedEOF can cause http.Transport to get hosed;
+ // this allows the client to detect that and take evasive action. Need
+ // to revisit once code.google.com/p/go/issues/detail?id=8648 gets fixed.
+ respBody = []byte{}
+ break
+ }
+ }
+
+ if resp.StatusCode == http.StatusTemporaryRedirect {
+ u, err := resp.Location()
+
+ if err != nil {
+ logger.Warning(err)
+ } else {
+ // set httpPath for following redirection
+ httpPath = u.String()
+ }
+ resp.Body.Close()
+ continue
+ }
+
+ if checkErr := checkRetry(c.cluster, numReqs, *resp,
+ errors.New("Unexpected HTTP status code")); checkErr != nil {
+ return nil, checkErr
+ }
+ resp.Body.Close()
+ }
+
+ r := &RawResponse{
+ StatusCode: resp.StatusCode,
+ Body: respBody,
+ Header: resp.Header,
+ }
+
+ return r, nil
+}
+
+// DefaultCheckRetry defines the retrying behaviour for bad HTTP requests
+// If we have retried 2 * machine number, stop retrying.
+// If status code is InternalServerError, sleep for 200ms.
+func DefaultCheckRetry(cluster *Cluster, numReqs int, lastResp http.Response,
+ err error) error {
+
+ if numReqs > 2*len(cluster.Machines) {
+ errStr := fmt.Sprintf("failed to propose on members %v twice [last error: %v]", cluster.Machines, err)
+ return newError(ErrCodeEtcdNotReachable, errStr, 0)
+ }
+
+ if isEmptyResponse(lastResp) {
+ // always retry if it failed to get response from one machine
+ return nil
+ }
+ if !shouldRetry(lastResp) {
+ body := []byte("nil")
+ if lastResp.Body != nil {
+ if b, err := ioutil.ReadAll(lastResp.Body); err == nil {
+ body = b
+ }
+ }
+ errStr := fmt.Sprintf("unhandled http status [%s] with body [%s]", http.StatusText(lastResp.StatusCode), body)
+ return newError(ErrCodeUnhandledHTTPStatus, errStr, 0)
+ }
+ // sleep some time and expect leader election finish
+ time.Sleep(time.Millisecond * 200)
+ logger.Warning("bad response status code ", lastResp.StatusCode)
+ return nil
+}
+
+func isEmptyResponse(r http.Response) bool { return r.StatusCode == 0 }
+
+// shouldRetry returns whether the reponse deserves retry.
+func shouldRetry(r http.Response) bool {
+ // TODO: only retry when the cluster is in leader election
+ // We cannot do it exactly because etcd doesn't support it well.
+ return r.StatusCode == http.StatusInternalServerError
+}
+
+func (c *Client) getHttpPath(s ...string) string {
+ fullPath := c.cluster.pick() + "/" + version
+ for _, seg := range s {
+ fullPath = fullPath + "/" + seg
+ }
+ return fullPath
+}
+
+// buildValues builds a url.Values map according to the given value and ttl
+func buildValues(value string, ttl uint64) url.Values {
+ v := url.Values{}
+
+ if value != "" {
+ v.Set("value", value)
+ }
+
+ if ttl > 0 {
+ v.Set("ttl", fmt.Sprintf("%v", ttl))
+ }
+
+ return v
+}
+
+// convert key string to http path exclude version, including URL escaping
+// for example: key[foo] -> path[keys/foo]
+// key[/%z] -> path[keys/%25z]
+// key[/] -> path[keys/]
+func keyToPath(key string) string {
+ // URL-escape our key, except for slashes
+ p := strings.Replace(url.QueryEscape(path.Join("keys", key)), "%2F", "/", -1)
+
+ // corner case: if key is "/" or "//" ect
+ // path join will clear the tailing "/"
+ // we need to add it back
+ if p == "keys" {
+ p = "keys/"
+ }
+
+ return p
+}
diff --git a/vendor/github.com/coreos/go-etcd/etcd/requests_test.go b/vendor/github.com/coreos/go-etcd/etcd/requests_test.go
new file mode 100644
index 0000000000..7a2bd190a1
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/requests_test.go
@@ -0,0 +1,22 @@
+package etcd
+
+import "testing"
+
+func TestKeyToPath(t *testing.T) {
+ tests := []struct {
+ key string
+ wpath string
+ }{
+ {"", "keys/"},
+ {"foo", "keys/foo"},
+ {"foo/bar", "keys/foo/bar"},
+ {"%z", "keys/%25z"},
+ {"/", "keys/"},
+ }
+ for i, tt := range tests {
+ path := keyToPath(tt.key)
+ if path != tt.wpath {
+ t.Errorf("#%d: path = %s, want %s", i, path, tt.wpath)
+ }
+ }
+}
diff --git a/vendor/github.com/coreos/go-etcd/etcd/response.generated.go b/vendor/github.com/coreos/go-etcd/etcd/response.generated.go
new file mode 100644
index 0000000000..95d2cd99d4
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/response.generated.go
@@ -0,0 +1,1587 @@
+// ************************************************************
+// DO NOT EDIT.
+// THIS FILE IS AUTO-GENERATED BY codecgen.
+// ************************************************************
+
+package etcd
+
+import (
+ "errors"
+ "fmt"
+ codec1978 "github.com/ugorji/go/codec"
+ pkg1_http "net/http"
+ "reflect"
+ "runtime"
+ time "time"
+)
+
+const (
+ // ----- content types ----
+ codecSelferC_UTF81978 = 1
+ codecSelferC_RAW1978 = 0
+ // ----- value types used ----
+ codecSelferValueTypeArray1978 = 10
+ codecSelferValueTypeMap1978 = 9
+ // ----- containerStateValues ----
+ codecSelfer_containerMapKey1978 = 2
+ codecSelfer_containerMapValue1978 = 3
+ codecSelfer_containerMapEnd1978 = 4
+ codecSelfer_containerArrayElem1978 = 6
+ codecSelfer_containerArrayEnd1978 = 7
+)
+
+var (
+ codecSelferBitsize1978 = uint8(reflect.TypeOf(uint(0)).Bits())
+ codecSelferOnlyMapOrArrayEncodeToStructErr1978 = errors.New(`only encoded map or array can be decoded into a struct`)
+)
+
+type codecSelfer1978 struct{}
+
+func init() {
+ if codec1978.GenVersion != 5 {
+ _, file, _, _ := runtime.Caller(0)
+ err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
+ 5, codec1978.GenVersion, file)
+ panic(err)
+ }
+ if false { // reference the types, but skip this branch at build/run time
+ var v0 pkg1_http.Header
+ var v1 time.Time
+ _, _ = v0, v1
+ }
+}
+
+func (x responseType) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1978
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ yym1 := z.EncBinary()
+ _ = yym1
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ r.EncodeInt(int64(x))
+ }
+}
+
+func (x *responseType) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1978
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym2 := z.DecBinary()
+ _ = yym2
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ *((*int)(x)) = int(r.DecodeInt(codecSelferBitsize1978))
+ }
+}
+
+func (x *RawResponse) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1978
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym3 := z.EncBinary()
+ _ = yym3
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep4 := !z.EncBinary()
+ yy2arr4 := z.EncBasicHandle().StructToArray
+ var yyq4 [3]bool
+ _, _, _ = yysep4, yyq4, yy2arr4
+ const yyr4 bool = false
+ var yynn4 int
+ if yyr4 || yy2arr4 {
+ r.EncodeArrayStart(3)
+ } else {
+ yynn4 = 3
+ for _, b := range yyq4 {
+ if b {
+ yynn4++
+ }
+ }
+ r.EncodeMapStart(yynn4)
+ yynn4 = 0
+ }
+ if yyr4 || yy2arr4 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
+ yym6 := z.EncBinary()
+ _ = yym6
+ if false {
+ } else {
+ r.EncodeInt(int64(x.StatusCode))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
+ r.EncodeString(codecSelferC_UTF81978, string("StatusCode"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
+ yym7 := z.EncBinary()
+ _ = yym7
+ if false {
+ } else {
+ r.EncodeInt(int64(x.StatusCode))
+ }
+ }
+ if yyr4 || yy2arr4 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
+ if x.Body == nil {
+ r.EncodeNil()
+ } else {
+ yym9 := z.EncBinary()
+ _ = yym9
+ if false {
+ } else {
+ r.EncodeStringBytes(codecSelferC_RAW1978, []byte(x.Body))
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
+ r.EncodeString(codecSelferC_UTF81978, string("Body"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
+ if x.Body == nil {
+ r.EncodeNil()
+ } else {
+ yym10 := z.EncBinary()
+ _ = yym10
+ if false {
+ } else {
+ r.EncodeStringBytes(codecSelferC_RAW1978, []byte(x.Body))
+ }
+ }
+ }
+ if yyr4 || yy2arr4 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
+ if x.Header == nil {
+ r.EncodeNil()
+ } else {
+ yym12 := z.EncBinary()
+ _ = yym12
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.Header) {
+ } else {
+ h.enchttp_Header((pkg1_http.Header)(x.Header), e)
+ }
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
+ r.EncodeString(codecSelferC_UTF81978, string("Header"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
+ if x.Header == nil {
+ r.EncodeNil()
+ } else {
+ yym13 := z.EncBinary()
+ _ = yym13
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x.Header) {
+ } else {
+ h.enchttp_Header((pkg1_http.Header)(x.Header), e)
+ }
+ }
+ }
+ if yyr4 || yy2arr4 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1978)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1978)
+ }
+ }
+ }
+}
+
+func (x *RawResponse) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1978
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym14 := z.DecBinary()
+ _ = yym14
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct15 := r.ContainerType()
+ if yyct15 == codecSelferValueTypeMap1978 {
+ yyl15 := r.ReadMapStart()
+ if yyl15 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1978)
+ } else {
+ x.codecDecodeSelfFromMap(yyl15, d)
+ }
+ } else if yyct15 == codecSelferValueTypeArray1978 {
+ yyl15 := r.ReadArrayStart()
+ if yyl15 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
+ } else {
+ x.codecDecodeSelfFromArray(yyl15, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1978)
+ }
+ }
+}
+
+func (x *RawResponse) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1978
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys16Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys16Slc
+ var yyhl16 bool = l >= 0
+ for yyj16 := 0; ; yyj16++ {
+ if yyhl16 {
+ if yyj16 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1978)
+ yys16Slc = r.DecodeBytes(yys16Slc, true, true)
+ yys16 := string(yys16Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1978)
+ switch yys16 {
+ case "StatusCode":
+ if r.TryDecodeAsNil() {
+ x.StatusCode = 0
+ } else {
+ x.StatusCode = int(r.DecodeInt(codecSelferBitsize1978))
+ }
+ case "Body":
+ if r.TryDecodeAsNil() {
+ x.Body = nil
+ } else {
+ yyv18 := &x.Body
+ yym19 := z.DecBinary()
+ _ = yym19
+ if false {
+ } else {
+ *yyv18 = r.DecodeBytes(*(*[]byte)(yyv18), false, false)
+ }
+ }
+ case "Header":
+ if r.TryDecodeAsNil() {
+ x.Header = nil
+ } else {
+ yyv20 := &x.Header
+ yym21 := z.DecBinary()
+ _ = yym21
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv20) {
+ } else {
+ h.dechttp_Header((*pkg1_http.Header)(yyv20), d)
+ }
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys16)
+ } // end switch yys16
+ } // end for yyj16
+ z.DecSendContainerState(codecSelfer_containerMapEnd1978)
+}
+
+func (x *RawResponse) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1978
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj22 int
+ var yyb22 bool
+ var yyhl22 bool = l >= 0
+ yyj22++
+ if yyhl22 {
+ yyb22 = yyj22 > l
+ } else {
+ yyb22 = r.CheckBreak()
+ }
+ if yyb22 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
+ if r.TryDecodeAsNil() {
+ x.StatusCode = 0
+ } else {
+ x.StatusCode = int(r.DecodeInt(codecSelferBitsize1978))
+ }
+ yyj22++
+ if yyhl22 {
+ yyb22 = yyj22 > l
+ } else {
+ yyb22 = r.CheckBreak()
+ }
+ if yyb22 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
+ if r.TryDecodeAsNil() {
+ x.Body = nil
+ } else {
+ yyv24 := &x.Body
+ yym25 := z.DecBinary()
+ _ = yym25
+ if false {
+ } else {
+ *yyv24 = r.DecodeBytes(*(*[]byte)(yyv24), false, false)
+ }
+ }
+ yyj22++
+ if yyhl22 {
+ yyb22 = yyj22 > l
+ } else {
+ yyb22 = r.CheckBreak()
+ }
+ if yyb22 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
+ if r.TryDecodeAsNil() {
+ x.Header = nil
+ } else {
+ yyv26 := &x.Header
+ yym27 := z.DecBinary()
+ _ = yym27
+ if false {
+ } else if z.HasExtensions() && z.DecExt(yyv26) {
+ } else {
+ h.dechttp_Header((*pkg1_http.Header)(yyv26), d)
+ }
+ }
+ for {
+ yyj22++
+ if yyhl22 {
+ yyb22 = yyj22 > l
+ } else {
+ yyb22 = r.CheckBreak()
+ }
+ if yyb22 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
+ z.DecStructFieldNotFound(yyj22-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
+}
+
+func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1978
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym28 := z.EncBinary()
+ _ = yym28
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep29 := !z.EncBinary()
+ yy2arr29 := z.EncBasicHandle().StructToArray
+ var yyq29 [6]bool
+ _, _, _ = yysep29, yyq29, yy2arr29
+ const yyr29 bool = false
+ yyq29[2] = x.PrevNode != nil
+ var yynn29 int
+ if yyr29 || yy2arr29 {
+ r.EncodeArrayStart(6)
+ } else {
+ yynn29 = 5
+ for _, b := range yyq29 {
+ if b {
+ yynn29++
+ }
+ }
+ r.EncodeMapStart(yynn29)
+ yynn29 = 0
+ }
+ if yyr29 || yy2arr29 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
+ yym31 := z.EncBinary()
+ _ = yym31
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81978, string(x.Action))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
+ r.EncodeString(codecSelferC_UTF81978, string("action"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
+ yym32 := z.EncBinary()
+ _ = yym32
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81978, string(x.Action))
+ }
+ }
+ if yyr29 || yy2arr29 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
+ if x.Node == nil {
+ r.EncodeNil()
+ } else {
+ x.Node.CodecEncodeSelf(e)
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
+ r.EncodeString(codecSelferC_UTF81978, string("node"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
+ if x.Node == nil {
+ r.EncodeNil()
+ } else {
+ x.Node.CodecEncodeSelf(e)
+ }
+ }
+ if yyr29 || yy2arr29 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
+ if yyq29[2] {
+ if x.PrevNode == nil {
+ r.EncodeNil()
+ } else {
+ x.PrevNode.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq29[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
+ r.EncodeString(codecSelferC_UTF81978, string("prevNode"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
+ if x.PrevNode == nil {
+ r.EncodeNil()
+ } else {
+ x.PrevNode.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr29 || yy2arr29 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
+ yym36 := z.EncBinary()
+ _ = yym36
+ if false {
+ } else {
+ r.EncodeUint(uint64(x.EtcdIndex))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
+ r.EncodeString(codecSelferC_UTF81978, string("etcdIndex"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
+ yym37 := z.EncBinary()
+ _ = yym37
+ if false {
+ } else {
+ r.EncodeUint(uint64(x.EtcdIndex))
+ }
+ }
+ if yyr29 || yy2arr29 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
+ yym39 := z.EncBinary()
+ _ = yym39
+ if false {
+ } else {
+ r.EncodeUint(uint64(x.RaftIndex))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
+ r.EncodeString(codecSelferC_UTF81978, string("raftIndex"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
+ yym40 := z.EncBinary()
+ _ = yym40
+ if false {
+ } else {
+ r.EncodeUint(uint64(x.RaftIndex))
+ }
+ }
+ if yyr29 || yy2arr29 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
+ yym42 := z.EncBinary()
+ _ = yym42
+ if false {
+ } else {
+ r.EncodeUint(uint64(x.RaftTerm))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
+ r.EncodeString(codecSelferC_UTF81978, string("raftTerm"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
+ yym43 := z.EncBinary()
+ _ = yym43
+ if false {
+ } else {
+ r.EncodeUint(uint64(x.RaftTerm))
+ }
+ }
+ if yyr29 || yy2arr29 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1978)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1978)
+ }
+ }
+ }
+}
+
+func (x *Response) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1978
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym44 := z.DecBinary()
+ _ = yym44
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct45 := r.ContainerType()
+ if yyct45 == codecSelferValueTypeMap1978 {
+ yyl45 := r.ReadMapStart()
+ if yyl45 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1978)
+ } else {
+ x.codecDecodeSelfFromMap(yyl45, d)
+ }
+ } else if yyct45 == codecSelferValueTypeArray1978 {
+ yyl45 := r.ReadArrayStart()
+ if yyl45 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
+ } else {
+ x.codecDecodeSelfFromArray(yyl45, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1978)
+ }
+ }
+}
+
+func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1978
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys46Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys46Slc
+ var yyhl46 bool = l >= 0
+ for yyj46 := 0; ; yyj46++ {
+ if yyhl46 {
+ if yyj46 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1978)
+ yys46Slc = r.DecodeBytes(yys46Slc, true, true)
+ yys46 := string(yys46Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1978)
+ switch yys46 {
+ case "action":
+ if r.TryDecodeAsNil() {
+ x.Action = ""
+ } else {
+ x.Action = string(r.DecodeString())
+ }
+ case "node":
+ if r.TryDecodeAsNil() {
+ if x.Node != nil {
+ x.Node = nil
+ }
+ } else {
+ if x.Node == nil {
+ x.Node = new(Node)
+ }
+ x.Node.CodecDecodeSelf(d)
+ }
+ case "prevNode":
+ if r.TryDecodeAsNil() {
+ if x.PrevNode != nil {
+ x.PrevNode = nil
+ }
+ } else {
+ if x.PrevNode == nil {
+ x.PrevNode = new(Node)
+ }
+ x.PrevNode.CodecDecodeSelf(d)
+ }
+ case "etcdIndex":
+ if r.TryDecodeAsNil() {
+ x.EtcdIndex = 0
+ } else {
+ x.EtcdIndex = uint64(r.DecodeUint(64))
+ }
+ case "raftIndex":
+ if r.TryDecodeAsNil() {
+ x.RaftIndex = 0
+ } else {
+ x.RaftIndex = uint64(r.DecodeUint(64))
+ }
+ case "raftTerm":
+ if r.TryDecodeAsNil() {
+ x.RaftTerm = 0
+ } else {
+ x.RaftTerm = uint64(r.DecodeUint(64))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys46)
+ } // end switch yys46
+ } // end for yyj46
+ z.DecSendContainerState(codecSelfer_containerMapEnd1978)
+}
+
+func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1978
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj53 int
+ var yyb53 bool
+ var yyhl53 bool = l >= 0
+ yyj53++
+ if yyhl53 {
+ yyb53 = yyj53 > l
+ } else {
+ yyb53 = r.CheckBreak()
+ }
+ if yyb53 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
+ if r.TryDecodeAsNil() {
+ x.Action = ""
+ } else {
+ x.Action = string(r.DecodeString())
+ }
+ yyj53++
+ if yyhl53 {
+ yyb53 = yyj53 > l
+ } else {
+ yyb53 = r.CheckBreak()
+ }
+ if yyb53 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
+ if r.TryDecodeAsNil() {
+ if x.Node != nil {
+ x.Node = nil
+ }
+ } else {
+ if x.Node == nil {
+ x.Node = new(Node)
+ }
+ x.Node.CodecDecodeSelf(d)
+ }
+ yyj53++
+ if yyhl53 {
+ yyb53 = yyj53 > l
+ } else {
+ yyb53 = r.CheckBreak()
+ }
+ if yyb53 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
+ if r.TryDecodeAsNil() {
+ if x.PrevNode != nil {
+ x.PrevNode = nil
+ }
+ } else {
+ if x.PrevNode == nil {
+ x.PrevNode = new(Node)
+ }
+ x.PrevNode.CodecDecodeSelf(d)
+ }
+ yyj53++
+ if yyhl53 {
+ yyb53 = yyj53 > l
+ } else {
+ yyb53 = r.CheckBreak()
+ }
+ if yyb53 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
+ if r.TryDecodeAsNil() {
+ x.EtcdIndex = 0
+ } else {
+ x.EtcdIndex = uint64(r.DecodeUint(64))
+ }
+ yyj53++
+ if yyhl53 {
+ yyb53 = yyj53 > l
+ } else {
+ yyb53 = r.CheckBreak()
+ }
+ if yyb53 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
+ if r.TryDecodeAsNil() {
+ x.RaftIndex = 0
+ } else {
+ x.RaftIndex = uint64(r.DecodeUint(64))
+ }
+ yyj53++
+ if yyhl53 {
+ yyb53 = yyj53 > l
+ } else {
+ yyb53 = r.CheckBreak()
+ }
+ if yyb53 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
+ if r.TryDecodeAsNil() {
+ x.RaftTerm = 0
+ } else {
+ x.RaftTerm = uint64(r.DecodeUint(64))
+ }
+ for {
+ yyj53++
+ if yyhl53 {
+ yyb53 = yyj53 > l
+ } else {
+ yyb53 = r.CheckBreak()
+ }
+ if yyb53 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
+ z.DecStructFieldNotFound(yyj53-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
+}
+
+func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1978
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym60 := z.EncBinary()
+ _ = yym60
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ yysep61 := !z.EncBinary()
+ yy2arr61 := z.EncBasicHandle().StructToArray
+ var yyq61 [8]bool
+ _, _, _ = yysep61, yyq61, yy2arr61
+ const yyr61 bool = false
+ yyq61[1] = x.Value != ""
+ yyq61[2] = x.Dir != false
+ yyq61[3] = x.Expiration != nil
+ yyq61[4] = x.TTL != 0
+ yyq61[5] = len(x.Nodes) != 0
+ yyq61[6] = x.ModifiedIndex != 0
+ yyq61[7] = x.CreatedIndex != 0
+ var yynn61 int
+ if yyr61 || yy2arr61 {
+ r.EncodeArrayStart(8)
+ } else {
+ yynn61 = 1
+ for _, b := range yyq61 {
+ if b {
+ yynn61++
+ }
+ }
+ r.EncodeMapStart(yynn61)
+ yynn61 = 0
+ }
+ if yyr61 || yy2arr61 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
+ yym63 := z.EncBinary()
+ _ = yym63
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81978, string(x.Key))
+ }
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
+ r.EncodeString(codecSelferC_UTF81978, string("key"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
+ yym64 := z.EncBinary()
+ _ = yym64
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81978, string(x.Key))
+ }
+ }
+ if yyr61 || yy2arr61 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
+ if yyq61[1] {
+ yym66 := z.EncBinary()
+ _ = yym66
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81978, string(x.Value))
+ }
+ } else {
+ r.EncodeString(codecSelferC_UTF81978, "")
+ }
+ } else {
+ if yyq61[1] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
+ r.EncodeString(codecSelferC_UTF81978, string("value"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
+ yym67 := z.EncBinary()
+ _ = yym67
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81978, string(x.Value))
+ }
+ }
+ }
+ if yyr61 || yy2arr61 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
+ if yyq61[2] {
+ yym69 := z.EncBinary()
+ _ = yym69
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Dir))
+ }
+ } else {
+ r.EncodeBool(false)
+ }
+ } else {
+ if yyq61[2] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
+ r.EncodeString(codecSelferC_UTF81978, string("dir"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
+ yym70 := z.EncBinary()
+ _ = yym70
+ if false {
+ } else {
+ r.EncodeBool(bool(x.Dir))
+ }
+ }
+ }
+ if yyr61 || yy2arr61 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
+ if yyq61[3] {
+ if x.Expiration == nil {
+ r.EncodeNil()
+ } else {
+ yym72 := z.EncBinary()
+ _ = yym72
+ if false {
+ } else if yym73 := z.TimeRtidIfBinc(); yym73 != 0 {
+ r.EncodeBuiltin(yym73, x.Expiration)
+ } else if z.HasExtensions() && z.EncExt(x.Expiration) {
+ } else if yym72 {
+ z.EncBinaryMarshal(x.Expiration)
+ } else if !yym72 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.Expiration)
+ } else {
+ z.EncFallback(x.Expiration)
+ }
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq61[3] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
+ r.EncodeString(codecSelferC_UTF81978, string("expiration"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
+ if x.Expiration == nil {
+ r.EncodeNil()
+ } else {
+ yym74 := z.EncBinary()
+ _ = yym74
+ if false {
+ } else if yym75 := z.TimeRtidIfBinc(); yym75 != 0 {
+ r.EncodeBuiltin(yym75, x.Expiration)
+ } else if z.HasExtensions() && z.EncExt(x.Expiration) {
+ } else if yym74 {
+ z.EncBinaryMarshal(x.Expiration)
+ } else if !yym74 && z.IsJSONHandle() {
+ z.EncJSONMarshal(x.Expiration)
+ } else {
+ z.EncFallback(x.Expiration)
+ }
+ }
+ }
+ }
+ if yyr61 || yy2arr61 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
+ if yyq61[4] {
+ yym77 := z.EncBinary()
+ _ = yym77
+ if false {
+ } else {
+ r.EncodeInt(int64(x.TTL))
+ }
+ } else {
+ r.EncodeInt(0)
+ }
+ } else {
+ if yyq61[4] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
+ r.EncodeString(codecSelferC_UTF81978, string("ttl"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
+ yym78 := z.EncBinary()
+ _ = yym78
+ if false {
+ } else {
+ r.EncodeInt(int64(x.TTL))
+ }
+ }
+ }
+ if yyr61 || yy2arr61 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
+ if yyq61[5] {
+ if x.Nodes == nil {
+ r.EncodeNil()
+ } else {
+ x.Nodes.CodecEncodeSelf(e)
+ }
+ } else {
+ r.EncodeNil()
+ }
+ } else {
+ if yyq61[5] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
+ r.EncodeString(codecSelferC_UTF81978, string("nodes"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
+ if x.Nodes == nil {
+ r.EncodeNil()
+ } else {
+ x.Nodes.CodecEncodeSelf(e)
+ }
+ }
+ }
+ if yyr61 || yy2arr61 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
+ if yyq61[6] {
+ yym81 := z.EncBinary()
+ _ = yym81
+ if false {
+ } else {
+ r.EncodeUint(uint64(x.ModifiedIndex))
+ }
+ } else {
+ r.EncodeUint(0)
+ }
+ } else {
+ if yyq61[6] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
+ r.EncodeString(codecSelferC_UTF81978, string("modifiedIndex"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
+ yym82 := z.EncBinary()
+ _ = yym82
+ if false {
+ } else {
+ r.EncodeUint(uint64(x.ModifiedIndex))
+ }
+ }
+ }
+ if yyr61 || yy2arr61 {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
+ if yyq61[7] {
+ yym84 := z.EncBinary()
+ _ = yym84
+ if false {
+ } else {
+ r.EncodeUint(uint64(x.CreatedIndex))
+ }
+ } else {
+ r.EncodeUint(0)
+ }
+ } else {
+ if yyq61[7] {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
+ r.EncodeString(codecSelferC_UTF81978, string("createdIndex"))
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
+ yym85 := z.EncBinary()
+ _ = yym85
+ if false {
+ } else {
+ r.EncodeUint(uint64(x.CreatedIndex))
+ }
+ }
+ }
+ if yyr61 || yy2arr61 {
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1978)
+ } else {
+ z.EncSendContainerState(codecSelfer_containerMapEnd1978)
+ }
+ }
+ }
+}
+
+func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1978
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym86 := z.DecBinary()
+ _ = yym86
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ yyct87 := r.ContainerType()
+ if yyct87 == codecSelferValueTypeMap1978 {
+ yyl87 := r.ReadMapStart()
+ if yyl87 == 0 {
+ z.DecSendContainerState(codecSelfer_containerMapEnd1978)
+ } else {
+ x.codecDecodeSelfFromMap(yyl87, d)
+ }
+ } else if yyct87 == codecSelferValueTypeArray1978 {
+ yyl87 := r.ReadArrayStart()
+ if yyl87 == 0 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
+ } else {
+ x.codecDecodeSelfFromArray(yyl87, d)
+ }
+ } else {
+ panic(codecSelferOnlyMapOrArrayEncodeToStructErr1978)
+ }
+ }
+}
+
+func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
+ var h codecSelfer1978
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yys88Slc = z.DecScratchBuffer() // default slice to decode into
+ _ = yys88Slc
+ var yyhl88 bool = l >= 0
+ for yyj88 := 0; ; yyj88++ {
+ if yyhl88 {
+ if yyj88 >= l {
+ break
+ }
+ } else {
+ if r.CheckBreak() {
+ break
+ }
+ }
+ z.DecSendContainerState(codecSelfer_containerMapKey1978)
+ yys88Slc = r.DecodeBytes(yys88Slc, true, true)
+ yys88 := string(yys88Slc)
+ z.DecSendContainerState(codecSelfer_containerMapValue1978)
+ switch yys88 {
+ case "key":
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ case "value":
+ if r.TryDecodeAsNil() {
+ x.Value = ""
+ } else {
+ x.Value = string(r.DecodeString())
+ }
+ case "dir":
+ if r.TryDecodeAsNil() {
+ x.Dir = false
+ } else {
+ x.Dir = bool(r.DecodeBool())
+ }
+ case "expiration":
+ if r.TryDecodeAsNil() {
+ if x.Expiration != nil {
+ x.Expiration = nil
+ }
+ } else {
+ if x.Expiration == nil {
+ x.Expiration = new(time.Time)
+ }
+ yym93 := z.DecBinary()
+ _ = yym93
+ if false {
+ } else if yym94 := z.TimeRtidIfBinc(); yym94 != 0 {
+ r.DecodeBuiltin(yym94, x.Expiration)
+ } else if z.HasExtensions() && z.DecExt(x.Expiration) {
+ } else if yym93 {
+ z.DecBinaryUnmarshal(x.Expiration)
+ } else if !yym93 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.Expiration)
+ } else {
+ z.DecFallback(x.Expiration, false)
+ }
+ }
+ case "ttl":
+ if r.TryDecodeAsNil() {
+ x.TTL = 0
+ } else {
+ x.TTL = int64(r.DecodeInt(64))
+ }
+ case "nodes":
+ if r.TryDecodeAsNil() {
+ x.Nodes = nil
+ } else {
+ yyv96 := &x.Nodes
+ yyv96.CodecDecodeSelf(d)
+ }
+ case "modifiedIndex":
+ if r.TryDecodeAsNil() {
+ x.ModifiedIndex = 0
+ } else {
+ x.ModifiedIndex = uint64(r.DecodeUint(64))
+ }
+ case "createdIndex":
+ if r.TryDecodeAsNil() {
+ x.CreatedIndex = 0
+ } else {
+ x.CreatedIndex = uint64(r.DecodeUint(64))
+ }
+ default:
+ z.DecStructFieldNotFound(-1, yys88)
+ } // end switch yys88
+ } // end for yyj88
+ z.DecSendContainerState(codecSelfer_containerMapEnd1978)
+}
+
+func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
+ var h codecSelfer1978
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ var yyj99 int
+ var yyb99 bool
+ var yyhl99 bool = l >= 0
+ yyj99++
+ if yyhl99 {
+ yyb99 = yyj99 > l
+ } else {
+ yyb99 = r.CheckBreak()
+ }
+ if yyb99 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
+ if r.TryDecodeAsNil() {
+ x.Key = ""
+ } else {
+ x.Key = string(r.DecodeString())
+ }
+ yyj99++
+ if yyhl99 {
+ yyb99 = yyj99 > l
+ } else {
+ yyb99 = r.CheckBreak()
+ }
+ if yyb99 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
+ if r.TryDecodeAsNil() {
+ x.Value = ""
+ } else {
+ x.Value = string(r.DecodeString())
+ }
+ yyj99++
+ if yyhl99 {
+ yyb99 = yyj99 > l
+ } else {
+ yyb99 = r.CheckBreak()
+ }
+ if yyb99 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
+ if r.TryDecodeAsNil() {
+ x.Dir = false
+ } else {
+ x.Dir = bool(r.DecodeBool())
+ }
+ yyj99++
+ if yyhl99 {
+ yyb99 = yyj99 > l
+ } else {
+ yyb99 = r.CheckBreak()
+ }
+ if yyb99 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
+ if r.TryDecodeAsNil() {
+ if x.Expiration != nil {
+ x.Expiration = nil
+ }
+ } else {
+ if x.Expiration == nil {
+ x.Expiration = new(time.Time)
+ }
+ yym104 := z.DecBinary()
+ _ = yym104
+ if false {
+ } else if yym105 := z.TimeRtidIfBinc(); yym105 != 0 {
+ r.DecodeBuiltin(yym105, x.Expiration)
+ } else if z.HasExtensions() && z.DecExt(x.Expiration) {
+ } else if yym104 {
+ z.DecBinaryUnmarshal(x.Expiration)
+ } else if !yym104 && z.IsJSONHandle() {
+ z.DecJSONUnmarshal(x.Expiration)
+ } else {
+ z.DecFallback(x.Expiration, false)
+ }
+ }
+ yyj99++
+ if yyhl99 {
+ yyb99 = yyj99 > l
+ } else {
+ yyb99 = r.CheckBreak()
+ }
+ if yyb99 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
+ if r.TryDecodeAsNil() {
+ x.TTL = 0
+ } else {
+ x.TTL = int64(r.DecodeInt(64))
+ }
+ yyj99++
+ if yyhl99 {
+ yyb99 = yyj99 > l
+ } else {
+ yyb99 = r.CheckBreak()
+ }
+ if yyb99 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
+ if r.TryDecodeAsNil() {
+ x.Nodes = nil
+ } else {
+ yyv107 := &x.Nodes
+ yyv107.CodecDecodeSelf(d)
+ }
+ yyj99++
+ if yyhl99 {
+ yyb99 = yyj99 > l
+ } else {
+ yyb99 = r.CheckBreak()
+ }
+ if yyb99 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
+ if r.TryDecodeAsNil() {
+ x.ModifiedIndex = 0
+ } else {
+ x.ModifiedIndex = uint64(r.DecodeUint(64))
+ }
+ yyj99++
+ if yyhl99 {
+ yyb99 = yyj99 > l
+ } else {
+ yyb99 = r.CheckBreak()
+ }
+ if yyb99 {
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
+ return
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
+ if r.TryDecodeAsNil() {
+ x.CreatedIndex = 0
+ } else {
+ x.CreatedIndex = uint64(r.DecodeUint(64))
+ }
+ for {
+ yyj99++
+ if yyhl99 {
+ yyb99 = yyj99 > l
+ } else {
+ yyb99 = r.CheckBreak()
+ }
+ if yyb99 {
+ break
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayElem1978)
+ z.DecStructFieldNotFound(yyj99-1, "")
+ }
+ z.DecSendContainerState(codecSelfer_containerArrayEnd1978)
+}
+
+func (x Nodes) CodecEncodeSelf(e *codec1978.Encoder) {
+ var h codecSelfer1978
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ if x == nil {
+ r.EncodeNil()
+ } else {
+ yym110 := z.EncBinary()
+ _ = yym110
+ if false {
+ } else if z.HasExtensions() && z.EncExt(x) {
+ } else {
+ h.encNodes((Nodes)(x), e)
+ }
+ }
+}
+
+func (x *Nodes) CodecDecodeSelf(d *codec1978.Decoder) {
+ var h codecSelfer1978
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+ yym111 := z.DecBinary()
+ _ = yym111
+ if false {
+ } else if z.HasExtensions() && z.DecExt(x) {
+ } else {
+ h.decNodes((*Nodes)(x), d)
+ }
+}
+
+func (x codecSelfer1978) enchttp_Header(v pkg1_http.Header, e *codec1978.Encoder) {
+ var h codecSelfer1978
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeMapStart(len(v))
+ for yyk112, yyv112 := range v {
+ z.EncSendContainerState(codecSelfer_containerMapKey1978)
+ yym113 := z.EncBinary()
+ _ = yym113
+ if false {
+ } else {
+ r.EncodeString(codecSelferC_UTF81978, string(yyk112))
+ }
+ z.EncSendContainerState(codecSelfer_containerMapValue1978)
+ if yyv112 == nil {
+ r.EncodeNil()
+ } else {
+ yym114 := z.EncBinary()
+ _ = yym114
+ if false {
+ } else {
+ z.F.EncSliceStringV(yyv112, false, e)
+ }
+ }
+ }
+ z.EncSendContainerState(codecSelfer_containerMapEnd1978)
+}
+
+func (x codecSelfer1978) dechttp_Header(v *pkg1_http.Header, d *codec1978.Decoder) {
+ var h codecSelfer1978
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv115 := *v
+ yyl115 := r.ReadMapStart()
+ yybh115 := z.DecBasicHandle()
+ if yyv115 == nil {
+ yyrl115, _ := z.DecInferLen(yyl115, yybh115.MaxInitLen, 40)
+ yyv115 = make(map[string][]string, yyrl115)
+ *v = yyv115
+ }
+ var yymk115 string
+ var yymv115 []string
+ var yymg115 bool
+ if yybh115.MapValueReset {
+ yymg115 = true
+ }
+ if yyl115 > 0 {
+ for yyj115 := 0; yyj115 < yyl115; yyj115++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey1978)
+ if r.TryDecodeAsNil() {
+ yymk115 = ""
+ } else {
+ yymk115 = string(r.DecodeString())
+ }
+
+ if yymg115 {
+ yymv115 = yyv115[yymk115]
+ } else {
+ yymv115 = nil
+ }
+ z.DecSendContainerState(codecSelfer_containerMapValue1978)
+ if r.TryDecodeAsNil() {
+ yymv115 = nil
+ } else {
+ yyv117 := &yymv115
+ yym118 := z.DecBinary()
+ _ = yym118
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv117, false, d)
+ }
+ }
+
+ if yyv115 != nil {
+ yyv115[yymk115] = yymv115
+ }
+ }
+ } else if yyl115 < 0 {
+ for yyj115 := 0; !r.CheckBreak(); yyj115++ {
+ z.DecSendContainerState(codecSelfer_containerMapKey1978)
+ if r.TryDecodeAsNil() {
+ yymk115 = ""
+ } else {
+ yymk115 = string(r.DecodeString())
+ }
+
+ if yymg115 {
+ yymv115 = yyv115[yymk115]
+ } else {
+ yymv115 = nil
+ }
+ z.DecSendContainerState(codecSelfer_containerMapValue1978)
+ if r.TryDecodeAsNil() {
+ yymv115 = nil
+ } else {
+ yyv120 := &yymv115
+ yym121 := z.DecBinary()
+ _ = yym121
+ if false {
+ } else {
+ z.F.DecSliceStringX(yyv120, false, d)
+ }
+ }
+
+ if yyv115 != nil {
+ yyv115[yymk115] = yymv115
+ }
+ }
+ } // else len==0: TODO: Should we clear map entries?
+ z.DecSendContainerState(codecSelfer_containerMapEnd1978)
+}
+
+func (x codecSelfer1978) encNodes(v Nodes, e *codec1978.Encoder) {
+ var h codecSelfer1978
+ z, r := codec1978.GenHelperEncoder(e)
+ _, _, _ = h, z, r
+ r.EncodeArrayStart(len(v))
+ for _, yyv122 := range v {
+ z.EncSendContainerState(codecSelfer_containerArrayElem1978)
+ if yyv122 == nil {
+ r.EncodeNil()
+ } else {
+ yyv122.CodecEncodeSelf(e)
+ }
+ }
+ z.EncSendContainerState(codecSelfer_containerArrayEnd1978)
+}
+
+func (x codecSelfer1978) decNodes(v *Nodes, d *codec1978.Decoder) {
+ var h codecSelfer1978
+ z, r := codec1978.GenHelperDecoder(d)
+ _, _, _ = h, z, r
+
+ yyv123 := *v
+ yyh123, yyl123 := z.DecSliceHelperStart()
+ var yyc123 bool
+ if yyl123 == 0 {
+ if yyv123 == nil {
+ yyv123 = []*Node{}
+ yyc123 = true
+ } else if len(yyv123) != 0 {
+ yyv123 = yyv123[:0]
+ yyc123 = true
+ }
+ } else if yyl123 > 0 {
+ var yyrr123, yyrl123 int
+ var yyrt123 bool
+ if yyl123 > cap(yyv123) {
+
+ yyrg123 := len(yyv123) > 0
+ yyv2123 := yyv123
+ yyrl123, yyrt123 = z.DecInferLen(yyl123, z.DecBasicHandle().MaxInitLen, 8)
+ if yyrt123 {
+ if yyrl123 <= cap(yyv123) {
+ yyv123 = yyv123[:yyrl123]
+ } else {
+ yyv123 = make([]*Node, yyrl123)
+ }
+ } else {
+ yyv123 = make([]*Node, yyrl123)
+ }
+ yyc123 = true
+ yyrr123 = len(yyv123)
+ if yyrg123 {
+ copy(yyv123, yyv2123)
+ }
+ } else if yyl123 != len(yyv123) {
+ yyv123 = yyv123[:yyl123]
+ yyc123 = true
+ }
+ yyj123 := 0
+ for ; yyj123 < yyrr123; yyj123++ {
+ yyh123.ElemContainerState(yyj123)
+ if r.TryDecodeAsNil() {
+ if yyv123[yyj123] != nil {
+ *yyv123[yyj123] = Node{}
+ }
+ } else {
+ if yyv123[yyj123] == nil {
+ yyv123[yyj123] = new(Node)
+ }
+ yyw124 := yyv123[yyj123]
+ yyw124.CodecDecodeSelf(d)
+ }
+
+ }
+ if yyrt123 {
+ for ; yyj123 < yyl123; yyj123++ {
+ yyv123 = append(yyv123, nil)
+ yyh123.ElemContainerState(yyj123)
+ if r.TryDecodeAsNil() {
+ if yyv123[yyj123] != nil {
+ *yyv123[yyj123] = Node{}
+ }
+ } else {
+ if yyv123[yyj123] == nil {
+ yyv123[yyj123] = new(Node)
+ }
+ yyw125 := yyv123[yyj123]
+ yyw125.CodecDecodeSelf(d)
+ }
+
+ }
+ }
+
+ } else {
+ yyj123 := 0
+ for ; !r.CheckBreak(); yyj123++ {
+
+ if yyj123 >= len(yyv123) {
+ yyv123 = append(yyv123, nil) // var yyz123 *Node
+ yyc123 = true
+ }
+ yyh123.ElemContainerState(yyj123)
+ if yyj123 < len(yyv123) {
+ if r.TryDecodeAsNil() {
+ if yyv123[yyj123] != nil {
+ *yyv123[yyj123] = Node{}
+ }
+ } else {
+ if yyv123[yyj123] == nil {
+ yyv123[yyj123] = new(Node)
+ }
+ yyw126 := yyv123[yyj123]
+ yyw126.CodecDecodeSelf(d)
+ }
+
+ } else {
+ z.DecSwallow()
+ }
+
+ }
+ if yyj123 < len(yyv123) {
+ yyv123 = yyv123[:yyj123]
+ yyc123 = true
+ } else if yyj123 == 0 && yyv123 == nil {
+ yyv123 = []*Node{}
+ yyc123 = true
+ }
+ }
+ yyh123.End()
+ if yyc123 {
+ *v = yyv123
+ }
+}
diff --git a/vendor/github.com/coreos/go-etcd/etcd/response.go b/vendor/github.com/coreos/go-etcd/etcd/response.go
new file mode 100644
index 0000000000..69b38be46e
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/response.go
@@ -0,0 +1,93 @@
+package etcd
+
+//go:generate codecgen -d 1978 -o response.generated.go response.go
+
+import (
+ "net/http"
+ "strconv"
+ "time"
+
+ "github.com/ugorji/go/codec"
+)
+
+const (
+ rawResponse = iota
+ normalResponse
+)
+
+type responseType int
+
+type RawResponse struct {
+ StatusCode int
+ Body []byte
+ Header http.Header
+}
+
+var (
+ validHttpStatusCode = map[int]bool{
+ http.StatusCreated: true,
+ http.StatusOK: true,
+ http.StatusBadRequest: true,
+ http.StatusNotFound: true,
+ http.StatusPreconditionFailed: true,
+ http.StatusForbidden: true,
+ http.StatusUnauthorized: true,
+ }
+)
+
+// Unmarshal parses RawResponse and stores the result in Response
+func (rr *RawResponse) Unmarshal() (*Response, error) {
+ if rr.StatusCode != http.StatusOK && rr.StatusCode != http.StatusCreated {
+ return nil, handleError(rr.Body)
+ }
+
+ resp := new(Response)
+
+ err := codec.NewDecoderBytes(rr.Body, new(codec.JsonHandle)).Decode(resp)
+
+ if err != nil {
+ return nil, err
+ }
+
+ // attach index and term to response
+ resp.EtcdIndex, _ = strconv.ParseUint(rr.Header.Get("X-Etcd-Index"), 10, 64)
+ resp.RaftIndex, _ = strconv.ParseUint(rr.Header.Get("X-Raft-Index"), 10, 64)
+ resp.RaftTerm, _ = strconv.ParseUint(rr.Header.Get("X-Raft-Term"), 10, 64)
+
+ return resp, nil
+}
+
+type Response struct {
+ Action string `json:"action"`
+ Node *Node `json:"node"`
+ PrevNode *Node `json:"prevNode,omitempty"`
+ EtcdIndex uint64 `json:"etcdIndex"`
+ RaftIndex uint64 `json:"raftIndex"`
+ RaftTerm uint64 `json:"raftTerm"`
+}
+
+type Node struct {
+ Key string `json:"key, omitempty"`
+ Value string `json:"value,omitempty"`
+ Dir bool `json:"dir,omitempty"`
+ Expiration *time.Time `json:"expiration,omitempty"`
+ TTL int64 `json:"ttl,omitempty"`
+ Nodes Nodes `json:"nodes,omitempty"`
+ ModifiedIndex uint64 `json:"modifiedIndex,omitempty"`
+ CreatedIndex uint64 `json:"createdIndex,omitempty"`
+}
+
+type Nodes []*Node
+
+// interfaces for sorting
+func (ns Nodes) Len() int {
+ return len(ns)
+}
+
+func (ns Nodes) Less(i, j int) bool {
+ return ns[i].Key < ns[j].Key
+}
+
+func (ns Nodes) Swap(i, j int) {
+ ns[i], ns[j] = ns[j], ns[i]
+}
diff --git a/vendor/github.com/coreos/go-etcd/etcd/response_test.go b/vendor/github.com/coreos/go-etcd/etcd/response_test.go
new file mode 100644
index 0000000000..23e0c56eb3
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/response_test.go
@@ -0,0 +1,75 @@
+package etcd
+
+import (
+ "net/http"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/ugorji/go/codec"
+)
+
+func createTestNode(size int) *Node {
+ return &Node{
+ Key: strings.Repeat("a", 30),
+ Value: strings.Repeat("a", size),
+ TTL: 123456789,
+ ModifiedIndex: 123456,
+ CreatedIndex: 123456,
+ }
+}
+
+func createTestNodeWithChildren(children, size int) *Node {
+ node := createTestNode(size)
+ for i := 0; i < children; i++ {
+ node.Nodes = append(node.Nodes, createTestNode(size))
+ }
+ return node
+}
+
+func createTestResponse(children, size int) *Response {
+ return &Response{
+ Action: "aaaaa",
+ Node: createTestNodeWithChildren(children, size),
+ PrevNode: nil,
+ EtcdIndex: 123456,
+ RaftIndex: 123456,
+ RaftTerm: 123456,
+ }
+}
+
+func benchmarkResponseUnmarshalling(b *testing.B, children, size int) {
+ response := createTestResponse(children, size)
+
+ rr := RawResponse{http.StatusOK, make([]byte, 0), http.Header{}}
+ codec.NewEncoderBytes(&rr.Body, new(codec.JsonHandle)).Encode(response)
+
+ b.ResetTimer()
+ newResponse := new(Response)
+ var err error
+ for i := 0; i < b.N; i++ {
+ if newResponse, err = rr.Unmarshal(); err != nil {
+ b.Errorf("Error: %v", err)
+ }
+
+ }
+ if !reflect.DeepEqual(response.Node, newResponse.Node) {
+ b.Errorf("Unexpected difference in a parsed response: \n%+v\n%+v", response, newResponse)
+ }
+}
+
+func BenchmarkSmallResponseUnmarshal(b *testing.B) {
+ benchmarkResponseUnmarshalling(b, 30, 20)
+}
+
+func BenchmarkManySmallResponseUnmarshal(b *testing.B) {
+ benchmarkResponseUnmarshalling(b, 3000, 20)
+}
+
+func BenchmarkMediumResponseUnmarshal(b *testing.B) {
+ benchmarkResponseUnmarshalling(b, 300, 200)
+}
+
+func BenchmarkLargeResponseUnmarshal(b *testing.B) {
+ benchmarkResponseUnmarshalling(b, 3000, 2000)
+}
diff --git a/vendor/github.com/coreos/go-etcd/etcd/set_curl_chan_test.go b/vendor/github.com/coreos/go-etcd/etcd/set_curl_chan_test.go
new file mode 100644
index 0000000000..87c86b8308
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/set_curl_chan_test.go
@@ -0,0 +1,42 @@
+package etcd
+
+import (
+ "fmt"
+ "testing"
+)
+
+func TestSetCurlChan(t *testing.T) {
+ c := NewClient(nil)
+ c.OpenCURL()
+
+ defer func() {
+ c.Delete("foo", true)
+ }()
+
+ _, err := c.Set("foo", "bar", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expected := fmt.Sprintf("curl -X PUT %s/v2/keys/foo -d value=bar -d ttl=5",
+ c.cluster.pick())
+ actual := c.RecvCURL()
+ if expected != actual {
+ t.Fatalf(`Command "%s" is not equal to expected value "%s"`,
+ actual, expected)
+ }
+
+ c.SetConsistency(STRONG_CONSISTENCY)
+ _, err = c.Get("foo", false, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expected = fmt.Sprintf("curl -X GET %s/v2/keys/foo?quorum=true&recursive=false&sorted=false",
+ c.cluster.pick())
+ actual = c.RecvCURL()
+ if expected != actual {
+ t.Fatalf(`Command "%s" is not equal to expected value "%s"`,
+ actual, expected)
+ }
+}
diff --git a/vendor/github.com/coreos/go-etcd/etcd/set_update_create.go b/vendor/github.com/coreos/go-etcd/etcd/set_update_create.go
new file mode 100644
index 0000000000..e2840cf356
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/set_update_create.go
@@ -0,0 +1,137 @@
+package etcd
+
+// Set sets the given key to the given value.
+// It will create a new key value pair or replace the old one.
+// It will not replace a existing directory.
+func (c *Client) Set(key string, value string, ttl uint64) (*Response, error) {
+ raw, err := c.RawSet(key, value, ttl)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return raw.Unmarshal()
+}
+
+// SetDir sets the given key to a directory.
+// It will create a new directory or replace the old key value pair by a directory.
+// It will not replace a existing directory.
+func (c *Client) SetDir(key string, ttl uint64) (*Response, error) {
+ raw, err := c.RawSetDir(key, ttl)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return raw.Unmarshal()
+}
+
+// CreateDir creates a directory. It succeeds only if
+// the given key does not yet exist.
+func (c *Client) CreateDir(key string, ttl uint64) (*Response, error) {
+ raw, err := c.RawCreateDir(key, ttl)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return raw.Unmarshal()
+}
+
+// UpdateDir updates the given directory. It succeeds only if the
+// given key already exists.
+func (c *Client) UpdateDir(key string, ttl uint64) (*Response, error) {
+ raw, err := c.RawUpdateDir(key, ttl)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return raw.Unmarshal()
+}
+
+// Create creates a file with the given value under the given key. It succeeds
+// only if the given key does not yet exist.
+func (c *Client) Create(key string, value string, ttl uint64) (*Response, error) {
+ raw, err := c.RawCreate(key, value, ttl)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return raw.Unmarshal()
+}
+
+// CreateInOrder creates a file with a key that's guaranteed to be higher than other
+// keys in the given directory. It is useful for creating queues.
+func (c *Client) CreateInOrder(dir string, value string, ttl uint64) (*Response, error) {
+ raw, err := c.RawCreateInOrder(dir, value, ttl)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return raw.Unmarshal()
+}
+
+// Update updates the given key to the given value. It succeeds only if the
+// given key already exists.
+func (c *Client) Update(key string, value string, ttl uint64) (*Response, error) {
+ raw, err := c.RawUpdate(key, value, ttl)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return raw.Unmarshal()
+}
+
+func (c *Client) RawUpdateDir(key string, ttl uint64) (*RawResponse, error) {
+ ops := Options{
+ "prevExist": true,
+ "dir": true,
+ }
+
+ return c.put(key, "", ttl, ops)
+}
+
+func (c *Client) RawCreateDir(key string, ttl uint64) (*RawResponse, error) {
+ ops := Options{
+ "prevExist": false,
+ "dir": true,
+ }
+
+ return c.put(key, "", ttl, ops)
+}
+
+func (c *Client) RawSet(key string, value string, ttl uint64) (*RawResponse, error) {
+ return c.put(key, value, ttl, nil)
+}
+
+func (c *Client) RawSetDir(key string, ttl uint64) (*RawResponse, error) {
+ ops := Options{
+ "dir": true,
+ }
+
+ return c.put(key, "", ttl, ops)
+}
+
+func (c *Client) RawUpdate(key string, value string, ttl uint64) (*RawResponse, error) {
+ ops := Options{
+ "prevExist": true,
+ }
+
+ return c.put(key, value, ttl, ops)
+}
+
+func (c *Client) RawCreate(key string, value string, ttl uint64) (*RawResponse, error) {
+ ops := Options{
+ "prevExist": false,
+ }
+
+ return c.put(key, value, ttl, ops)
+}
+
+func (c *Client) RawCreateInOrder(dir string, value string, ttl uint64) (*RawResponse, error) {
+ return c.post(dir, value, ttl)
+}
diff --git a/vendor/github.com/coreos/go-etcd/etcd/set_update_create_test.go b/vendor/github.com/coreos/go-etcd/etcd/set_update_create_test.go
new file mode 100644
index 0000000000..ced0f06e7b
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/set_update_create_test.go
@@ -0,0 +1,241 @@
+package etcd
+
+import (
+ "testing"
+)
+
+func TestSet(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("foo", true)
+ }()
+
+ resp, err := c.Set("foo", "bar", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if resp.Node.Key != "/foo" || resp.Node.Value != "bar" || resp.Node.TTL != 5 {
+ t.Fatalf("Set 1 failed: %#v", resp)
+ }
+ if resp.PrevNode != nil {
+ t.Fatalf("Set 1 PrevNode failed: %#v", resp)
+ }
+
+ resp, err = c.Set("foo", "bar2", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.Node.Key == "/foo" && resp.Node.Value == "bar2" && resp.Node.TTL == 5) {
+ t.Fatalf("Set 2 failed: %#v", resp)
+ }
+ if resp.PrevNode.Key != "/foo" || resp.PrevNode.Value != "bar" || resp.Node.TTL != 5 {
+ t.Fatalf("Set 2 PrevNode failed: %#v", resp)
+ }
+}
+
+func TestUpdate(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("foo", true)
+ c.Delete("nonexistent", true)
+ }()
+
+ resp, err := c.Set("foo", "bar", 5)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // This should succeed.
+ resp, err = c.Update("foo", "wakawaka", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !(resp.Action == "update" && resp.Node.Key == "/foo" && resp.Node.TTL == 5) {
+ t.Fatalf("Update 1 failed: %#v", resp)
+ }
+ if !(resp.PrevNode.Key == "/foo" && resp.PrevNode.Value == "bar" && resp.Node.TTL == 5) {
+ t.Fatalf("Update 1 prevValue failed: %#v", resp)
+ }
+
+ // This should fail because the key does not exist.
+ resp, err = c.Update("nonexistent", "whatever", 5)
+ if err == nil {
+ t.Fatalf("The key %v did not exist, so the update should have failed."+
+ "The response was: %#v", resp.Node.Key, resp)
+ }
+}
+
+func TestCreate(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("newKey", true)
+ }()
+
+ newKey := "/newKey"
+ newValue := "/newValue"
+
+ // This should succeed
+ resp, err := c.Create(newKey, newValue, 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !(resp.Action == "create" && resp.Node.Key == newKey &&
+ resp.Node.Value == newValue && resp.Node.TTL == 5) {
+ t.Fatalf("Create 1 failed: %#v", resp)
+ }
+ if resp.PrevNode != nil {
+ t.Fatalf("Create 1 PrevNode failed: %#v", resp)
+ }
+
+ // This should fail, because the key is already there
+ resp, err = c.Create(newKey, newValue, 5)
+ if err == nil {
+ t.Fatalf("The key %v did exist, so the creation should have failed."+
+ "The response was: %#v", resp.Node.Key, resp)
+ }
+}
+
+func TestCreateInOrder(t *testing.T) {
+ c := NewClient(nil)
+ dir := "/queue"
+ defer func() {
+ c.DeleteDir(dir)
+ }()
+
+ var firstKey, secondKey string
+
+ resp, err := c.CreateInOrder(dir, "1", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !(resp.Action == "create" && resp.Node.Value == "1" && resp.Node.TTL == 5) {
+ t.Fatalf("Create 1 failed: %#v", resp)
+ }
+
+ firstKey = resp.Node.Key
+
+ resp, err = c.CreateInOrder(dir, "2", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !(resp.Action == "create" && resp.Node.Value == "2" && resp.Node.TTL == 5) {
+ t.Fatalf("Create 2 failed: %#v", resp)
+ }
+
+ secondKey = resp.Node.Key
+
+ if firstKey >= secondKey {
+ t.Fatalf("Expected first key to be greater than second key, but %s is not greater than %s",
+ firstKey, secondKey)
+ }
+}
+
+func TestSetDir(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("foo", true)
+ c.Delete("fooDir", true)
+ }()
+
+ resp, err := c.CreateDir("fooDir", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.Node.Key == "/fooDir" && resp.Node.Value == "" && resp.Node.TTL == 5) {
+ t.Fatalf("SetDir 1 failed: %#v", resp)
+ }
+ if resp.PrevNode != nil {
+ t.Fatalf("SetDir 1 PrevNode failed: %#v", resp)
+ }
+
+ // This should fail because /fooDir already points to a directory
+ resp, err = c.CreateDir("/fooDir", 5)
+ if err == nil {
+ t.Fatalf("fooDir already points to a directory, so SetDir should have failed."+
+ "The response was: %#v", resp)
+ }
+
+ _, err = c.Set("foo", "bar", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // This should succeed
+ // It should replace the key
+ resp, err = c.SetDir("foo", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.Node.Key == "/foo" && resp.Node.Value == "" && resp.Node.TTL == 5) {
+ t.Fatalf("SetDir 2 failed: %#v", resp)
+ }
+ if !(resp.PrevNode.Key == "/foo" && resp.PrevNode.Value == "bar" && resp.PrevNode.TTL == 5) {
+ t.Fatalf("SetDir 2 failed: %#v", resp)
+ }
+}
+
+func TestUpdateDir(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("fooDir", true)
+ }()
+
+ resp, err := c.CreateDir("fooDir", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // This should succeed.
+ resp, err = c.UpdateDir("fooDir", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !(resp.Action == "update" && resp.Node.Key == "/fooDir" &&
+ resp.Node.Value == "" && resp.Node.TTL == 5) {
+ t.Fatalf("UpdateDir 1 failed: %#v", resp)
+ }
+ if !(resp.PrevNode.Key == "/fooDir" && resp.PrevNode.Dir == true && resp.PrevNode.TTL == 5) {
+ t.Fatalf("UpdateDir 1 PrevNode failed: %#v", resp)
+ }
+
+ // This should fail because the key does not exist.
+ resp, err = c.UpdateDir("nonexistentDir", 5)
+ if err == nil {
+ t.Fatalf("The key %v did not exist, so the update should have failed."+
+ "The response was: %#v", resp.Node.Key, resp)
+ }
+}
+
+func TestCreateDir(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("fooDir", true)
+ }()
+
+ // This should succeed
+ resp, err := c.CreateDir("fooDir", 5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !(resp.Action == "create" && resp.Node.Key == "/fooDir" &&
+ resp.Node.Value == "" && resp.Node.TTL == 5) {
+ t.Fatalf("CreateDir 1 failed: %#v", resp)
+ }
+ if resp.PrevNode != nil {
+ t.Fatalf("CreateDir 1 PrevNode failed: %#v", resp)
+ }
+
+ // This should fail, because the key is already there
+ resp, err = c.CreateDir("fooDir", 5)
+ if err == nil {
+ t.Fatalf("The key %v did exist, so the creation should have failed."+
+ "The response was: %#v", resp.Node.Key, resp)
+ }
+}
diff --git a/vendor/github.com/coreos/go-etcd/etcd/shuffle.go b/vendor/github.com/coreos/go-etcd/etcd/shuffle.go
new file mode 100644
index 0000000000..c26ddac30c
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/shuffle.go
@@ -0,0 +1,19 @@
+package etcd
+
+import (
+ "math/rand"
+)
+
+func shuffleStringSlice(cards []string) []string {
+ size := len(cards)
+ //Do not need to copy if nothing changed
+ if size <= 1 {
+ return cards
+ }
+ shuffled := make([]string, size)
+ index := rand.Perm(size)
+ for i := range cards {
+ shuffled[index[i]] = cards[i]
+ }
+ return shuffled
+}
diff --git a/vendor/github.com/coreos/go-etcd/etcd/version.go b/vendor/github.com/coreos/go-etcd/etcd/version.go
new file mode 100644
index 0000000000..b1e9ed2713
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/version.go
@@ -0,0 +1,6 @@
+package etcd
+
+const (
+ version = "v2"
+ packageVersion = "v2.0.0+git"
+)
diff --git a/vendor/github.com/coreos/go-etcd/etcd/watch.go b/vendor/github.com/coreos/go-etcd/etcd/watch.go
new file mode 100644
index 0000000000..aa8d3df301
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/watch.go
@@ -0,0 +1,103 @@
+package etcd
+
+import (
+ "errors"
+)
+
+// Errors introduced by the Watch command.
+var (
+ ErrWatchStoppedByUser = errors.New("Watch stopped by the user via stop channel")
+)
+
+// If recursive is set to true the watch returns the first change under the given
+// prefix since the given index.
+//
+// If recursive is set to false the watch returns the first change to the given key
+// since the given index.
+//
+// To watch for the latest change, set waitIndex = 0.
+//
+// If a receiver channel is given, it will be a long-term watch. Watch will block at the
+//channel. After someone receives the channel, it will go on to watch that
+// prefix. If a stop channel is given, the client can close long-term watch using
+// the stop channel.
+func (c *Client) Watch(prefix string, waitIndex uint64, recursive bool,
+ receiver chan *Response, stop chan bool) (*Response, error) {
+ logger.Debugf("watch %s [%s]", prefix, c.cluster.Leader)
+ if receiver == nil {
+ raw, err := c.watchOnce(prefix, waitIndex, recursive, stop)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return raw.Unmarshal()
+ }
+ defer close(receiver)
+
+ for {
+ raw, err := c.watchOnce(prefix, waitIndex, recursive, stop)
+
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := raw.Unmarshal()
+
+ if err != nil {
+ return nil, err
+ }
+
+ waitIndex = resp.Node.ModifiedIndex + 1
+ receiver <- resp
+ }
+}
+
+func (c *Client) RawWatch(prefix string, waitIndex uint64, recursive bool,
+ receiver chan *RawResponse, stop chan bool) (*RawResponse, error) {
+
+ logger.Debugf("rawWatch %s [%s]", prefix, c.cluster.Leader)
+ if receiver == nil {
+ return c.watchOnce(prefix, waitIndex, recursive, stop)
+ }
+
+ for {
+ raw, err := c.watchOnce(prefix, waitIndex, recursive, stop)
+
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := raw.Unmarshal()
+
+ if err != nil {
+ return nil, err
+ }
+
+ waitIndex = resp.Node.ModifiedIndex + 1
+ receiver <- raw
+ }
+}
+
+// helper func
+// return when there is change under the given prefix
+func (c *Client) watchOnce(key string, waitIndex uint64, recursive bool, stop chan bool) (*RawResponse, error) {
+
+ options := Options{
+ "wait": true,
+ }
+ if waitIndex > 0 {
+ options["waitIndex"] = waitIndex
+ }
+ if recursive {
+ options["recursive"] = true
+ }
+
+ resp, err := c.getCancelable(key, options, stop)
+
+ if err == ErrRequestCancelled {
+ return nil, ErrWatchStoppedByUser
+ }
+
+ return resp, err
+}
diff --git a/vendor/github.com/coreos/go-etcd/etcd/watch_test.go b/vendor/github.com/coreos/go-etcd/etcd/watch_test.go
new file mode 100644
index 0000000000..43e1dfeb81
--- /dev/null
+++ b/vendor/github.com/coreos/go-etcd/etcd/watch_test.go
@@ -0,0 +1,119 @@
+package etcd
+
+import (
+ "fmt"
+ "runtime"
+ "testing"
+ "time"
+)
+
+func TestWatch(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("watch_foo", true)
+ }()
+
+ go setHelper("watch_foo", "bar", c)
+
+ resp, err := c.Watch("watch_foo", 0, false, nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.Node.Key == "/watch_foo" && resp.Node.Value == "bar") {
+ t.Fatalf("Watch 1 failed: %#v", resp)
+ }
+
+ go setHelper("watch_foo", "bar", c)
+
+ resp, err = c.Watch("watch_foo", resp.Node.ModifiedIndex+1, false, nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.Node.Key == "/watch_foo" && resp.Node.Value == "bar") {
+ t.Fatalf("Watch 2 failed: %#v", resp)
+ }
+
+ routineNum := runtime.NumGoroutine()
+
+ ch := make(chan *Response, 10)
+ stop := make(chan bool, 1)
+
+ go setLoop("watch_foo", "bar", c)
+
+ go receiver(ch, stop)
+
+ _, err = c.Watch("watch_foo", 0, false, ch, stop)
+ if err != ErrWatchStoppedByUser {
+ t.Fatalf("Watch returned a non-user stop error")
+ }
+
+ if newRoutineNum := runtime.NumGoroutine(); newRoutineNum != routineNum {
+ t.Fatalf("Routine numbers differ after watch stop: %v, %v", routineNum, newRoutineNum)
+ }
+}
+
+func TestWatchAll(t *testing.T) {
+ c := NewClient(nil)
+ defer func() {
+ c.Delete("watch_foo", true)
+ }()
+
+ go setHelper("watch_foo/foo", "bar", c)
+
+ resp, err := c.Watch("watch_foo", 0, true, nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.Node.Key == "/watch_foo/foo" && resp.Node.Value == "bar") {
+ t.Fatalf("WatchAll 1 failed: %#v", resp)
+ }
+
+ go setHelper("watch_foo/foo", "bar", c)
+
+ resp, err = c.Watch("watch_foo", resp.Node.ModifiedIndex+1, true, nil, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !(resp.Node.Key == "/watch_foo/foo" && resp.Node.Value == "bar") {
+ t.Fatalf("WatchAll 2 failed: %#v", resp)
+ }
+
+ ch := make(chan *Response, 10)
+ stop := make(chan bool, 1)
+
+ routineNum := runtime.NumGoroutine()
+
+ go setLoop("watch_foo/foo", "bar", c)
+
+ go receiver(ch, stop)
+
+ _, err = c.Watch("watch_foo", 0, true, ch, stop)
+ if err != ErrWatchStoppedByUser {
+ t.Fatalf("Watch returned a non-user stop error")
+ }
+
+ if newRoutineNum := runtime.NumGoroutine(); newRoutineNum != routineNum {
+ t.Fatalf("Routine numbers differ after watch stop: %v, %v", routineNum, newRoutineNum)
+ }
+}
+
+func setHelper(key, value string, c *Client) {
+ time.Sleep(time.Second)
+ c.Set(key, value, 100)
+}
+
+func setLoop(key, value string, c *Client) {
+ time.Sleep(time.Second)
+ for i := 0; i < 10; i++ {
+ newValue := fmt.Sprintf("%s_%v", value, i)
+ c.Set(key, newValue, 100)
+ time.Sleep(time.Second / 10)
+ }
+}
+
+func receiver(c chan *Response, stop chan bool) {
+ for i := 0; i < 10; i++ {
+ <-c
+ }
+ stop <- true
+}
diff --git a/vendor/github.com/cpuguy83/go-md2man/.gitignore b/vendor/github.com/cpuguy83/go-md2man/.gitignore
new file mode 100644
index 0000000000..b651fbfb1d
--- /dev/null
+++ b/vendor/github.com/cpuguy83/go-md2man/.gitignore
@@ -0,0 +1 @@
+go-md2man
diff --git a/vendor/github.com/cpuguy83/go-md2man/LICENSE.md b/vendor/github.com/cpuguy83/go-md2man/LICENSE.md
new file mode 100644
index 0000000000..1cade6cef6
--- /dev/null
+++ b/vendor/github.com/cpuguy83/go-md2man/LICENSE.md
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Brian Goff
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/cpuguy83/go-md2man/README.md b/vendor/github.com/cpuguy83/go-md2man/README.md
new file mode 100644
index 0000000000..8eb4b2eb07
--- /dev/null
+++ b/vendor/github.com/cpuguy83/go-md2man/README.md
@@ -0,0 +1,11 @@
+go-md2man
+=========
+
+** Work in Progress **
+This still needs a lot of help to be complete, or even usable!
+
+Uses blackfriday to process markdown into man pages.
+
+### Usage
+
+./md2man -in /path/to/markdownfile.md -out /manfile/output/path
diff --git a/vendor/github.com/cpuguy83/go-md2man/go-md2man.1.md b/vendor/github.com/cpuguy83/go-md2man/go-md2man.1.md
new file mode 100644
index 0000000000..1f7096a746
--- /dev/null
+++ b/vendor/github.com/cpuguy83/go-md2man/go-md2man.1.md
@@ -0,0 +1,21 @@
+go-md2man 1 "January 2015" go-md2man "User Manual"
+==================================================
+
+# NAME
+ go-md2man - Convert mardown files into manpages
+
+# SYNOPSIS
+ go-md2man -in=[/path/to/md/file] -out=[/path/to/output]
+
+# Description
+ go-md2man converts standard markdown formatted documents into manpages. It is
+ written purely in Go so as to reduce dependencies on 3rd party libs.
+
+# Example
+ Convert the markdown file "go-md2man.1.md" into a manpage.
+
+ go-md2man -in=README.md -out=go-md2man.1.out
+
+# HISTORY
+ January 2015, Originally compiled by Brian Goff( cpuguy83@gmail.com )
+
diff --git a/vendor/github.com/cpuguy83/go-md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/md2man.go
new file mode 100644
index 0000000000..1dc70f47a7
--- /dev/null
+++ b/vendor/github.com/cpuguy83/go-md2man/md2man.go
@@ -0,0 +1,44 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ "github.com/cpuguy83/go-md2man/md2man"
+)
+
+var inFilePath = flag.String("in", "", "Path to file to be processed")
+var outFilePath = flag.String("out", "", "Path to output processed file")
+
+func main() {
+ flag.Parse()
+
+ inFile, err := os.Open(*inFilePath)
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+ defer inFile.Close()
+
+ doc, err := ioutil.ReadAll(inFile)
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+
+ out := md2man.Render(doc)
+
+ outFile, err := os.Create(*outFilePath)
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+ defer outFile.Close()
+ _, err = outFile.Write(out)
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+}
diff --git a/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go
new file mode 100644
index 0000000000..8f44fa1550
--- /dev/null
+++ b/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go
@@ -0,0 +1,19 @@
+package md2man
+
+import (
+ "github.com/russross/blackfriday"
+)
+
+func Render(doc []byte) []byte {
+ renderer := RoffRenderer(0)
+ extensions := 0
+ extensions |= blackfriday.EXTENSION_NO_INTRA_EMPHASIS
+ extensions |= blackfriday.EXTENSION_TABLES
+ extensions |= blackfriday.EXTENSION_FENCED_CODE
+ extensions |= blackfriday.EXTENSION_AUTOLINK
+ extensions |= blackfriday.EXTENSION_SPACE_HEADERS
+ extensions |= blackfriday.EXTENSION_FOOTNOTES
+ extensions |= blackfriday.EXTENSION_TITLEBLOCK
+
+ return blackfriday.Markdown(doc, renderer, extensions)
+}
diff --git a/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go
new file mode 100644
index 0000000000..9f12daa160
--- /dev/null
+++ b/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go
@@ -0,0 +1,281 @@
+package md2man
+
+import (
+ "bytes"
+ "fmt"
+ "html"
+ "strings"
+
+ "github.com/russross/blackfriday"
+)
+
+type roffRenderer struct{}
+
+var listCounter int
+
+func RoffRenderer(flags int) blackfriday.Renderer {
+ return &roffRenderer{}
+}
+
+func (r *roffRenderer) GetFlags() int {
+ return 0
+}
+
+func (r *roffRenderer) TitleBlock(out *bytes.Buffer, text []byte) {
+ out.WriteString(".TH ")
+
+ splitText := bytes.Split(text, []byte("\n"))
+ for i, line := range splitText {
+ line = bytes.TrimPrefix(line, []byte("% "))
+ if i == 0 {
+ line = bytes.Replace(line, []byte("("), []byte("\" \""), 1)
+ line = bytes.Replace(line, []byte(")"), []byte("\" \""), 1)
+ }
+ line = append([]byte("\""), line...)
+ line = append(line, []byte("\" ")...)
+ out.Write(line)
+ }
+ out.WriteString("\n")
+
+ // disable hyphenation
+ out.WriteString(".nh\n")
+ // disable justification (adjust text to left margin only)
+ out.WriteString(".ad l\n")
+}
+
+func (r *roffRenderer) BlockCode(out *bytes.Buffer, text []byte, lang string) {
+ out.WriteString("\n.PP\n.RS\n\n.nf\n")
+ escapeSpecialChars(out, text)
+ out.WriteString("\n.fi\n.RE\n")
+}
+
+func (r *roffRenderer) BlockQuote(out *bytes.Buffer, text []byte) {
+ out.WriteString("\n.PP\n.RS\n")
+ out.Write(text)
+ out.WriteString("\n.RE\n")
+}
+
+func (r *roffRenderer) BlockHtml(out *bytes.Buffer, text []byte) {
+ out.Write(text)
+}
+
+func (r *roffRenderer) Header(out *bytes.Buffer, text func() bool, level int, id string) {
+ marker := out.Len()
+
+ switch {
+ case marker == 0:
+ // This is the doc header
+ out.WriteString(".TH ")
+ case level == 1:
+ out.WriteString("\n\n.SH ")
+ case level == 2:
+ out.WriteString("\n.SH ")
+ default:
+ out.WriteString("\n.SS ")
+ }
+
+ if !text() {
+ out.Truncate(marker)
+ return
+ }
+}
+
+func (r *roffRenderer) HRule(out *bytes.Buffer) {
+ out.WriteString("\n.ti 0\n\\l'\\n(.lu'\n")
+}
+
+func (r *roffRenderer) List(out *bytes.Buffer, text func() bool, flags int) {
+ marker := out.Len()
+ if flags&blackfriday.LIST_TYPE_ORDERED != 0 {
+ listCounter = 1
+ }
+ if !text() {
+ out.Truncate(marker)
+ return
+ }
+}
+
+func (r *roffRenderer) ListItem(out *bytes.Buffer, text []byte, flags int) {
+ if flags&blackfriday.LIST_TYPE_ORDERED != 0 {
+ out.WriteString(fmt.Sprintf(".IP \"%3d.\" 5\n", listCounter))
+ listCounter += 1
+ } else {
+ out.WriteString(".IP \\(bu 2\n")
+ }
+ out.Write(text)
+ out.WriteString("\n")
+}
+
+func (r *roffRenderer) Paragraph(out *bytes.Buffer, text func() bool) {
+ marker := out.Len()
+ out.WriteString("\n.PP\n")
+ if !text() {
+ out.Truncate(marker)
+ return
+ }
+ if marker != 0 {
+ out.WriteString("\n")
+ }
+}
+
+// TODO: This might now work
+func (r *roffRenderer) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) {
+ out.WriteString(".TS\nallbox;\n")
+
+ out.Write(header)
+ out.Write(body)
+ out.WriteString("\n.TE\n")
+}
+
+func (r *roffRenderer) TableRow(out *bytes.Buffer, text []byte) {
+ if out.Len() > 0 {
+ out.WriteString("\n")
+ }
+ out.Write(text)
+ out.WriteString("\n")
+}
+
+func (r *roffRenderer) TableHeaderCell(out *bytes.Buffer, text []byte, align int) {
+ if out.Len() > 0 {
+ out.WriteString(" ")
+ }
+ out.Write(text)
+ out.WriteString(" ")
+}
+
+// TODO: This is probably broken
+func (r *roffRenderer) TableCell(out *bytes.Buffer, text []byte, align int) {
+ if out.Len() > 0 {
+ out.WriteString("\t")
+ }
+ out.Write(text)
+ out.WriteString("\t")
+}
+
+func (r *roffRenderer) Footnotes(out *bytes.Buffer, text func() bool) {
+
+}
+
+func (r *roffRenderer) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) {
+
+}
+
+func (r *roffRenderer) AutoLink(out *bytes.Buffer, link []byte, kind int) {
+ out.WriteString("\n\\[la]")
+ out.Write(link)
+ out.WriteString("\\[ra]")
+}
+
+func (r *roffRenderer) CodeSpan(out *bytes.Buffer, text []byte) {
+ out.WriteString("\\fB\\fC")
+ escapeSpecialChars(out, text)
+ out.WriteString("\\fR")
+}
+
+func (r *roffRenderer) DoubleEmphasis(out *bytes.Buffer, text []byte) {
+ out.WriteString("\\fB")
+ out.Write(text)
+ out.WriteString("\\fP")
+}
+
+func (r *roffRenderer) Emphasis(out *bytes.Buffer, text []byte) {
+ out.WriteString("\\fI")
+ out.Write(text)
+ out.WriteString("\\fP")
+}
+
+func (r *roffRenderer) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) {
+}
+
+func (r *roffRenderer) LineBreak(out *bytes.Buffer) {
+ out.WriteString("\n.br\n")
+}
+
+func (r *roffRenderer) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) {
+ r.AutoLink(out, link, 0)
+}
+
+func (r *roffRenderer) RawHtmlTag(out *bytes.Buffer, tag []byte) {
+ out.Write(tag)
+}
+
+func (r *roffRenderer) TripleEmphasis(out *bytes.Buffer, text []byte) {
+ out.WriteString("\\s+2")
+ out.Write(text)
+ out.WriteString("\\s-2")
+}
+
+func (r *roffRenderer) StrikeThrough(out *bytes.Buffer, text []byte) {
+}
+
+func (r *roffRenderer) FootnoteRef(out *bytes.Buffer, ref []byte, id int) {
+
+}
+
+func (r *roffRenderer) Entity(out *bytes.Buffer, entity []byte) {
+ out.WriteString(html.UnescapeString(string(entity)))
+}
+
+func processFooterText(text []byte) []byte {
+ text = bytes.TrimPrefix(text, []byte("% "))
+ newText := []byte{}
+ textArr := strings.Split(string(text), ") ")
+
+ for i, w := range textArr {
+ if i == 0 {
+ w = strings.Replace(w, "(", "\" \"", 1)
+ w = fmt.Sprintf("\"%s\"", w)
+ } else {
+ w = fmt.Sprintf(" \"%s\"", w)
+ }
+ newText = append(newText, []byte(w)...)
+ }
+ newText = append(newText, []byte(" \"\"")...)
+
+ return newText
+}
+
+func (r *roffRenderer) NormalText(out *bytes.Buffer, text []byte) {
+ escapeSpecialChars(out, text)
+}
+
+func (r *roffRenderer) DocumentHeader(out *bytes.Buffer) {
+}
+
+func (r *roffRenderer) DocumentFooter(out *bytes.Buffer) {
+}
+
+func needsBackslash(c byte) bool {
+ for _, r := range []byte("-_&\\~") {
+ if c == r {
+ return true
+ }
+ }
+ return false
+}
+
+func escapeSpecialChars(out *bytes.Buffer, text []byte) {
+ for i := 0; i < len(text); i++ {
+ // escape initial apostrophe or period
+ if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') {
+ out.WriteString("\\&")
+ }
+
+ // directly copy normal characters
+ org := i
+
+ for i < len(text) && !needsBackslash(text[i]) {
+ i++
+ }
+ if i > org {
+ out.Write(text[org:i])
+ }
+
+ // escape a character
+ if i >= len(text) {
+ break
+ }
+ out.WriteByte('\\')
+ out.WriteByte(text[i])
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/.gitignore b/vendor/github.com/hashicorp/hcl/.gitignore
new file mode 100644
index 0000000000..15586a2b54
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/.gitignore
@@ -0,0 +1,9 @@
+y.output
+
+# ignore intellij files
+.idea
+*.iml
+*.ipr
+*.iws
+
+*.test
diff --git a/vendor/github.com/hashicorp/hcl/.travis.yml b/vendor/github.com/hashicorp/hcl/.travis.yml
new file mode 100644
index 0000000000..83dc540ef9
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/.travis.yml
@@ -0,0 +1,3 @@
+sudo: false
+language: go
+go: 1.5
diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/hcl/LICENSE
new file mode 100644
index 0000000000..c33dcc7c92
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/hcl/Makefile b/vendor/github.com/hashicorp/hcl/Makefile
new file mode 100644
index 0000000000..ad404a8113
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/Makefile
@@ -0,0 +1,17 @@
+TEST?=./...
+
+default: test
+
+fmt: generate
+ go fmt ./...
+
+test: generate
+ go test $(TEST) $(TESTARGS)
+
+generate:
+ go generate ./...
+
+updatedeps:
+ go get -u golang.org/x/tools/cmd/stringer
+
+.PHONY: default generate test updatedeps
diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md
new file mode 100644
index 0000000000..3d5b8bd925
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/README.md
@@ -0,0 +1,104 @@
+# HCL
+
+[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl)
+
+HCL (HashiCorp Configuration Language) is a configuration language built
+by HashiCorp. The goal of HCL is to build a structured configuration language
+that is both human and machine friendly for use with command-line tools, but
+specifically targeted towards DevOps tools, servers, etc.
+
+HCL is also fully JSON compatible. That is, JSON can be used as completely
+valid input to a system expecting HCL. This helps makes systems
+interoperable with other systems.
+
+HCL is heavily inspired by
+[libucl](https://github.com/vstakhov/libucl),
+nginx configuration, and others similar.
+
+## Why?
+
+A common question when viewing HCL is to ask the question: why not
+JSON, YAML, etc.?
+
+Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com)
+used a variety of configuration languages from full programming languages
+such as Ruby to complete data structure languages such as JSON. What we
+learned is that some people wanted human-friendly configuration languages
+and some people wanted machine-friendly languages.
+
+JSON fits a nice balance in this, but is fairly verbose and most
+importantly doesn't support comments. With YAML, we found that beginners
+had a really hard time determining what the actual structure was, and
+ended up guessing more often than not whether to use a hyphen, colon, etc.
+in order to represent some configuration key.
+
+Full programming languages such as Ruby enable complex behavior
+a configuration language shouldn't usually allow, and also forces
+people to learn some set of Ruby.
+
+Because of this, we decided to create our own configuration language
+that is JSON-compatible. Our configuration language (HCL) is designed
+to be written and modified by humans. The API for HCL allows JSON
+as an input so that it is also machine-friendly (machines can generate
+JSON instead of trying to generate HCL).
+
+Our goal with HCL is not to alienate other configuration languages.
+It is instead to provide HCL as a specialized language for our tools,
+and JSON as the interoperability layer.
+
+## Syntax
+
+For a complete grammar, please see the parser itself. A high-level overview
+of the syntax and grammar is listed here.
+
+ * Single line comments start with `#` or `//`
+
+ * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments
+ are not allowed. A multi-line comment (also known as a block comment)
+ terminates at the first `*/` found.
+
+ * Values are assigned with the syntax `key = value` (whitespace doesn't
+ matter). The value can be any primitive: a string, number, boolean,
+ object, or list.
+
+ * Strings are double-quoted and can contain any UTF-8 characters.
+ Example: `"Hello, World"`
+
+ * Multi-line strings start with `<-
+ echo %Path%
+
+ go version
+
+ go env
+build_script:
+- cmd: go test -v ./...
diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go
new file mode 100644
index 0000000000..02888d2ab6
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/decoder.go
@@ -0,0 +1,654 @@
+package hcl
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/hcl/parser"
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+// This is the tag to use with structures to have settings for HCL
+const tagName = "hcl"
+
+var (
+ // nodeType holds a reference to the type of ast.Node
+ nodeType reflect.Type = findNodeType()
+)
+
+// Unmarshal accepts a byte slice as input and writes the
+// data to the value pointed to by v.
+func Unmarshal(bs []byte, v interface{}) error {
+ root, err := parse(bs)
+ if err != nil {
+ return err
+ }
+
+ return DecodeObject(v, root)
+}
+
+// Decode reads the given input and decodes it into the structure
+// given by `out`.
+func Decode(out interface{}, in string) error {
+ obj, err := Parse(in)
+ if err != nil {
+ return err
+ }
+
+ return DecodeObject(out, obj)
+}
+
+// DecodeObject is a lower-level version of Decode. It decodes a
+// raw Object into the given output.
+func DecodeObject(out interface{}, n ast.Node) error {
+ val := reflect.ValueOf(out)
+ if val.Kind() != reflect.Ptr {
+ return errors.New("result must be a pointer")
+ }
+
+ // If we have the file, we really decode the root node
+ if f, ok := n.(*ast.File); ok {
+ n = f.Node
+ }
+
+ var d decoder
+ return d.decode("root", n, val.Elem())
+}
+
+type decoder struct {
+ stack []reflect.Kind
+}
+
+func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error {
+ k := result
+
+ // If we have an interface with a valid value, we use that
+ // for the check.
+ if result.Kind() == reflect.Interface {
+ elem := result.Elem()
+ if elem.IsValid() {
+ k = elem
+ }
+ }
+
+ // Push current onto stack unless it is an interface.
+ if k.Kind() != reflect.Interface {
+ d.stack = append(d.stack, k.Kind())
+
+ // Schedule a pop
+ defer func() {
+ d.stack = d.stack[:len(d.stack)-1]
+ }()
+ }
+
+ switch k.Kind() {
+ case reflect.Bool:
+ return d.decodeBool(name, node, result)
+ case reflect.Float64:
+ return d.decodeFloat(name, node, result)
+ case reflect.Int:
+ return d.decodeInt(name, node, result)
+ case reflect.Interface:
+ // When we see an interface, we make our own thing
+ return d.decodeInterface(name, node, result)
+ case reflect.Map:
+ return d.decodeMap(name, node, result)
+ case reflect.Ptr:
+ return d.decodePtr(name, node, result)
+ case reflect.Slice:
+ return d.decodeSlice(name, node, result)
+ case reflect.String:
+ return d.decodeString(name, node, result)
+ case reflect.Struct:
+ return d.decodeStruct(name, node, result)
+ default:
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()),
+ }
+ }
+}
+
+func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error {
+ switch n := node.(type) {
+ case *ast.LiteralType:
+ if n.Token.Type == token.BOOL {
+ v, err := strconv.ParseBool(n.Token.Text)
+ if err != nil {
+ return err
+ }
+
+ result.Set(reflect.ValueOf(v))
+ return nil
+ }
+ }
+
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown type %T", name, node),
+ }
+}
+
+func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error {
+ switch n := node.(type) {
+ case *ast.LiteralType:
+ if n.Token.Type == token.FLOAT {
+ v, err := strconv.ParseFloat(n.Token.Text, 64)
+ if err != nil {
+ return err
+ }
+
+ result.Set(reflect.ValueOf(v))
+ return nil
+ }
+ }
+
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown type %T", name, node),
+ }
+}
+
+func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error {
+ switch n := node.(type) {
+ case *ast.LiteralType:
+ switch n.Token.Type {
+ case token.NUMBER:
+ v, err := strconv.ParseInt(n.Token.Text, 0, 0)
+ if err != nil {
+ return err
+ }
+
+ result.Set(reflect.ValueOf(int(v)))
+ return nil
+ case token.STRING:
+ v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0)
+ if err != nil {
+ return err
+ }
+
+ result.Set(reflect.ValueOf(int(v)))
+ return nil
+ }
+ }
+
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown type %T", name, node),
+ }
+}
+
+func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error {
+ // When we see an ast.Node, we retain the value to enable deferred decoding.
+ // Very useful in situations where we want to preserve ast.Node information
+ // like Pos
+ if result.Type() == nodeType && result.CanSet() {
+ result.Set(reflect.ValueOf(node))
+ return nil
+ }
+
+ var set reflect.Value
+ redecode := true
+
+ // For testing types, ObjectType should just be treated as a list. We
+ // set this to a temporary var because we want to pass in the real node.
+ testNode := node
+ if ot, ok := node.(*ast.ObjectType); ok {
+ testNode = ot.List
+ }
+
+ switch n := testNode.(type) {
+ case *ast.ObjectList:
+ // If we're at the root or we're directly within a slice, then we
+ // decode objects into map[string]interface{}, otherwise we decode
+ // them into lists.
+ if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
+ var temp map[string]interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeMap(
+ reflect.MapOf(
+ reflect.TypeOf(""),
+ tempVal.Type().Elem()))
+
+ set = result
+ } else {
+ var temp []map[string]interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeSlice(
+ reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items))
+ set = result
+ }
+ case *ast.ObjectType:
+ // If we're at the root or we're directly within a slice, then we
+ // decode objects into map[string]interface{}, otherwise we decode
+ // them into lists.
+ if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
+ var temp map[string]interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeMap(
+ reflect.MapOf(
+ reflect.TypeOf(""),
+ tempVal.Type().Elem()))
+
+ set = result
+ } else {
+ var temp []map[string]interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeSlice(
+ reflect.SliceOf(tempVal.Type().Elem()), 0, 1)
+ set = result
+ }
+ case *ast.ListType:
+ var temp []interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeSlice(
+ reflect.SliceOf(tempVal.Type().Elem()), 0, 0)
+ set = result
+ case *ast.LiteralType:
+ switch n.Token.Type {
+ case token.BOOL:
+ var result bool
+ set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+ case token.FLOAT:
+ var result float64
+ set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+ case token.NUMBER:
+ var result int
+ set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+ case token.STRING, token.HEREDOC:
+ set = reflect.Indirect(reflect.New(reflect.TypeOf("")))
+ default:
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node),
+ }
+ }
+ default:
+ return fmt.Errorf(
+ "%s: cannot decode into interface: %T",
+ name, node)
+ }
+
+ // Set the result to what its supposed to be, then reset
+ // result so we don't reflect into this method anymore.
+ result.Set(set)
+
+ if redecode {
+ // Revisit the node so that we can use the newly instantiated
+ // thing and populate it.
+ if err := d.decode(name, node, result); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error {
+ if item, ok := node.(*ast.ObjectItem); ok {
+ node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
+ }
+
+ if ot, ok := node.(*ast.ObjectType); ok {
+ node = ot.List
+ }
+
+ n, ok := node.(*ast.ObjectList)
+ if !ok {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: not an object type for map (%T)", name, node),
+ }
+ }
+
+ // If we have an interface, then we can address the interface,
+ // but not the slice itself, so get the element but set the interface
+ set := result
+ if result.Kind() == reflect.Interface {
+ result = result.Elem()
+ }
+
+ resultType := result.Type()
+ resultElemType := resultType.Elem()
+ resultKeyType := resultType.Key()
+ if resultKeyType.Kind() != reflect.String {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: map must have string keys", name),
+ }
+ }
+
+ // Make a map if it is nil
+ resultMap := result
+ if result.IsNil() {
+ resultMap = reflect.MakeMap(
+ reflect.MapOf(resultKeyType, resultElemType))
+ }
+
+ // Go through each element and decode it.
+ done := make(map[string]struct{})
+ for _, item := range n.Items {
+ if item.Val == nil {
+ continue
+ }
+
+ // github.com/hashicorp/terraform/issue/5740
+ if len(item.Keys) == 0 {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: map must have string keys", name),
+ }
+ }
+
+ // Get the key we're dealing with, which is the first item
+ keyStr := item.Keys[0].Token.Value().(string)
+
+ // If we've already processed this key, then ignore it
+ if _, ok := done[keyStr]; ok {
+ continue
+ }
+
+ // Determine the value. If we have more than one key, then we
+ // get the objectlist of only these keys.
+ itemVal := item.Val
+ if len(item.Keys) > 1 {
+ itemVal = n.Filter(keyStr)
+ done[keyStr] = struct{}{}
+ }
+
+ // Make the field name
+ fieldName := fmt.Sprintf("%s.%s", name, keyStr)
+
+ // Get the key/value as reflection values
+ key := reflect.ValueOf(keyStr)
+ val := reflect.Indirect(reflect.New(resultElemType))
+
+ // If we have a pre-existing value in the map, use that
+ oldVal := resultMap.MapIndex(key)
+ if oldVal.IsValid() {
+ val.Set(oldVal)
+ }
+
+ // Decode!
+ if err := d.decode(fieldName, itemVal, val); err != nil {
+ return err
+ }
+
+ // Set the value on the map
+ resultMap.SetMapIndex(key, val)
+ }
+
+ // Set the final map if we can
+ set.Set(resultMap)
+ return nil
+}
+
+func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error {
+ // Create an element of the concrete (non pointer) type and decode
+ // into that. Then set the value of the pointer to this type.
+ resultType := result.Type()
+ resultElemType := resultType.Elem()
+ val := reflect.New(resultElemType)
+ if err := d.decode(name, node, reflect.Indirect(val)); err != nil {
+ return err
+ }
+
+ result.Set(val)
+ return nil
+}
+
+func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error {
+ // If we have an interface, then we can address the interface,
+ // but not the slice itself, so get the element but set the interface
+ set := result
+ if result.Kind() == reflect.Interface {
+ result = result.Elem()
+ }
+
+ // Create the slice if it isn't nil
+ resultType := result.Type()
+ resultElemType := resultType.Elem()
+ if result.IsNil() {
+ resultSliceType := reflect.SliceOf(resultElemType)
+ result = reflect.MakeSlice(
+ resultSliceType, 0, 0)
+ }
+
+ // Figure out the items we'll be copying into the slice
+ var items []ast.Node
+ switch n := node.(type) {
+ case *ast.ObjectList:
+ items = make([]ast.Node, len(n.Items))
+ for i, item := range n.Items {
+ items[i] = item
+ }
+ case *ast.ObjectType:
+ items = []ast.Node{n}
+ case *ast.ListType:
+ items = n.List
+ default:
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("unknown slice type: %T", node),
+ }
+ }
+
+ for i, item := range items {
+ fieldName := fmt.Sprintf("%s[%d]", name, i)
+
+ // Decode
+ val := reflect.Indirect(reflect.New(resultElemType))
+ if err := d.decode(fieldName, item, val); err != nil {
+ return err
+ }
+
+ // Append it onto the slice
+ result = reflect.Append(result, val)
+ }
+
+ set.Set(result)
+ return nil
+}
+
+func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error {
+ switch n := node.(type) {
+ case *ast.LiteralType:
+ switch n.Token.Type {
+ case token.NUMBER:
+ result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type()))
+ return nil
+ case token.STRING, token.HEREDOC:
+ result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type()))
+ return nil
+ }
+ }
+
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown type for string %T", name, node),
+ }
+}
+
+func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error {
+ var item *ast.ObjectItem
+ if it, ok := node.(*ast.ObjectItem); ok {
+ item = it
+ node = it.Val
+ }
+
+ if ot, ok := node.(*ast.ObjectType); ok {
+ node = ot.List
+ }
+
+ // Handle the special case where the object itself is a literal. Previously
+ // the yacc parser would always ensure top-level elements were arrays. The new
+ // parser does not make the same guarantees, thus we need to convert any
+ // top-level literal elements into a list.
+ if _, ok := node.(*ast.LiteralType); ok {
+ node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
+ }
+
+ list, ok := node.(*ast.ObjectList)
+ if !ok {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node),
+ }
+ }
+
+ // This slice will keep track of all the structs we'll be decoding.
+ // There can be more than one struct if there are embedded structs
+ // that are squashed.
+ structs := make([]reflect.Value, 1, 5)
+ structs[0] = result
+
+ // Compile the list of all the fields that we're going to be decoding
+ // from all the structs.
+ fields := make(map[*reflect.StructField]reflect.Value)
+ for len(structs) > 0 {
+ structVal := structs[0]
+ structs = structs[1:]
+
+ structType := structVal.Type()
+ for i := 0; i < structType.NumField(); i++ {
+ fieldType := structType.Field(i)
+
+ if fieldType.Anonymous {
+ fieldKind := fieldType.Type.Kind()
+ if fieldKind != reflect.Struct {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unsupported type to struct: %s",
+ fieldType.Name, fieldKind),
+ }
+ }
+
+ // We have an embedded field. We "squash" the fields down
+ // if specified in the tag.
+ squash := false
+ tagParts := strings.Split(fieldType.Tag.Get(tagName), ",")
+ for _, tag := range tagParts[1:] {
+ if tag == "squash" {
+ squash = true
+ break
+ }
+ }
+
+ if squash {
+ structs = append(
+ structs, result.FieldByName(fieldType.Name))
+ continue
+ }
+ }
+
+ // Normal struct field, store it away
+ fields[&fieldType] = structVal.Field(i)
+ }
+ }
+
+ usedKeys := make(map[string]struct{})
+ decodedFields := make([]string, 0, len(fields))
+ decodedFieldsVal := make([]reflect.Value, 0)
+ unusedKeysVal := make([]reflect.Value, 0)
+ for fieldType, field := range fields {
+ if !field.IsValid() {
+ // This should never happen
+ panic("field is not valid")
+ }
+
+ // If we can't set the field, then it is unexported or something,
+ // and we just continue onwards.
+ if !field.CanSet() {
+ continue
+ }
+
+ fieldName := fieldType.Name
+
+ tagValue := fieldType.Tag.Get(tagName)
+ tagParts := strings.SplitN(tagValue, ",", 2)
+ if len(tagParts) >= 2 {
+ switch tagParts[1] {
+ case "decodedFields":
+ decodedFieldsVal = append(decodedFieldsVal, field)
+ continue
+ case "key":
+ if item == nil {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: %s asked for 'key', impossible",
+ name, fieldName),
+ }
+ }
+
+ field.SetString(item.Keys[0].Token.Value().(string))
+ continue
+ case "unusedKeys":
+ unusedKeysVal = append(unusedKeysVal, field)
+ continue
+ }
+ }
+
+ if tagParts[0] != "" {
+ fieldName = tagParts[0]
+ }
+
+ // Determine the element we'll use to decode. If it is a single
+ // match (only object with the field), then we decode it exactly.
+ // If it is a prefix match, then we decode the matches.
+ filter := list.Filter(fieldName)
+ prefixMatches := filter.Children()
+ matches := filter.Elem()
+ if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 {
+ continue
+ }
+
+ // Track the used key
+ usedKeys[fieldName] = struct{}{}
+
+ // Create the field name and decode. We range over the elements
+ // because we actually want the value.
+ fieldName = fmt.Sprintf("%s.%s", name, fieldName)
+ if len(prefixMatches.Items) > 0 {
+ if err := d.decode(fieldName, prefixMatches, field); err != nil {
+ return err
+ }
+ }
+ for _, match := range matches.Items {
+ var decodeNode ast.Node = match.Val
+ if ot, ok := decodeNode.(*ast.ObjectType); ok {
+ decodeNode = &ast.ObjectList{Items: ot.List.Items}
+ }
+
+ if err := d.decode(fieldName, decodeNode, field); err != nil {
+ return err
+ }
+ }
+
+ decodedFields = append(decodedFields, fieldType.Name)
+ }
+
+ if len(decodedFieldsVal) > 0 {
+ // Sort it so that it is deterministic
+ sort.Strings(decodedFields)
+
+ for _, v := range decodedFieldsVal {
+ v.Set(reflect.ValueOf(decodedFields))
+ }
+ }
+
+ return nil
+}
+
+// findNodeType returns the type of ast.Node
+func findNodeType() reflect.Type {
+ var nodeContainer struct {
+ Node ast.Node
+ }
+ value := reflect.ValueOf(nodeContainer).FieldByName("Node")
+ return value.Type()
+}
diff --git a/vendor/github.com/hashicorp/hcl/decoder_test.go b/vendor/github.com/hashicorp/hcl/decoder_test.go
new file mode 100644
index 0000000000..5aea64a018
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/decoder_test.go
@@ -0,0 +1,730 @@
+package hcl
+
+import (
+ "io/ioutil"
+ "path/filepath"
+ "reflect"
+ "testing"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/testhelper"
+)
+
+func TestDecode_interface(t *testing.T) {
+ cases := []struct {
+ File string
+ Err bool
+ Out interface{}
+ }{
+ {
+ "basic.hcl",
+ false,
+ map[string]interface{}{
+ "foo": "bar",
+ "bar": "${file(\"bing/bong.txt\")}",
+ },
+ },
+ {
+ "basic_squish.hcl",
+ false,
+ map[string]interface{}{
+ "foo": "bar",
+ "bar": "${file(\"bing/bong.txt\")}",
+ "foo-bar": "baz",
+ },
+ },
+ {
+ "empty.hcl",
+ false,
+ map[string]interface{}{
+ "resource": []map[string]interface{}{
+ map[string]interface{}{
+ "foo": []map[string]interface{}{
+ map[string]interface{}{},
+ },
+ },
+ },
+ },
+ },
+ {
+ "tfvars.hcl",
+ false,
+ map[string]interface{}{
+ "regularvar": "Should work",
+ "map.key1": "Value",
+ "map.key2": "Other value",
+ },
+ },
+ {
+ "escape.hcl",
+ false,
+ map[string]interface{}{
+ "foo": "bar\"baz\\n",
+ },
+ },
+ {
+ "interpolate_escape.hcl",
+ false,
+ map[string]interface{}{
+ "foo": "${file(\"bing/bong.txt\")}",
+ },
+ },
+ {
+ "float.hcl",
+ false,
+ map[string]interface{}{
+ "a": 1.02,
+ },
+ },
+ {
+ "multiline_bad.hcl",
+ true,
+ nil,
+ },
+ {
+ "multiline_no_marker.hcl",
+ true,
+ nil,
+ },
+ {
+ "multiline.hcl",
+ false,
+ map[string]interface{}{"foo": testhelper.Unix2dos("bar\nbaz\n")},
+ },
+ {
+ "multiline_indented.hcl",
+ false,
+ map[string]interface{}{"foo": testhelper.Unix2dos(" bar\n baz\n")},
+ },
+ {
+ "multiline_no_hanging_indent.hcl",
+ false,
+ map[string]interface{}{"foo": testhelper.Unix2dos(" baz\n bar\n foo\n")},
+ },
+ {
+ "multiline_no_eof.hcl",
+ false,
+ map[string]interface{}{"foo": testhelper.Unix2dos("bar\nbaz\n"), "key": "value"},
+ },
+ {
+ "multiline.json",
+ false,
+ map[string]interface{}{"foo": "bar\nbaz"},
+ },
+ {
+ "scientific.json",
+ false,
+ map[string]interface{}{
+ "a": 1e-10,
+ "b": 1e+10,
+ "c": 1e10,
+ "d": 1.2e-10,
+ "e": 1.2e+10,
+ "f": 1.2e10,
+ },
+ },
+ {
+ "scientific.hcl",
+ false,
+ map[string]interface{}{
+ "a": 1e-10,
+ "b": 1e+10,
+ "c": 1e10,
+ "d": 1.2e-10,
+ "e": 1.2e+10,
+ "f": 1.2e10,
+ },
+ },
+ {
+ "terraform_heroku.hcl",
+ false,
+ map[string]interface{}{
+ "name": "terraform-test-app",
+ "config_vars": []map[string]interface{}{
+ map[string]interface{}{
+ "FOO": "bar",
+ },
+ },
+ },
+ },
+ {
+ "structure_multi.hcl",
+ false,
+ map[string]interface{}{
+ "foo": []map[string]interface{}{
+ map[string]interface{}{
+ "baz": []map[string]interface{}{
+ map[string]interface{}{"key": 7},
+ },
+ },
+ map[string]interface{}{
+ "bar": []map[string]interface{}{
+ map[string]interface{}{"key": 12},
+ },
+ },
+ },
+ },
+ },
+ {
+ "structure_multi.json",
+ false,
+ map[string]interface{}{
+ "foo": []map[string]interface{}{
+ map[string]interface{}{
+ "baz": []map[string]interface{}{
+ map[string]interface{}{"key": 7},
+ },
+ },
+ map[string]interface{}{
+ "bar": []map[string]interface{}{
+ map[string]interface{}{"key": 12},
+ },
+ },
+ },
+ },
+ },
+ {
+ "structure_list.hcl",
+ false,
+ map[string]interface{}{
+ "foo": []map[string]interface{}{
+ map[string]interface{}{
+ "key": 7,
+ },
+ map[string]interface{}{
+ "key": 12,
+ },
+ },
+ },
+ },
+ {
+ "structure_list.json",
+ false,
+ map[string]interface{}{
+ "foo": []map[string]interface{}{
+ map[string]interface{}{
+ "key": 7,
+ },
+ map[string]interface{}{
+ "key": 12,
+ },
+ },
+ },
+ },
+ {
+ "structure_list_deep.json",
+ false,
+ map[string]interface{}{
+ "bar": []map[string]interface{}{
+ map[string]interface{}{
+ "foo": []map[string]interface{}{
+ map[string]interface{}{
+ "name": "terraform_example",
+ "ingress": []map[string]interface{}{
+ map[string]interface{}{
+ "from_port": 22,
+ },
+ map[string]interface{}{
+ "from_port": 80,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+
+ {
+ "nested_block_comment.hcl",
+ false,
+ map[string]interface{}{
+ "bar": "value",
+ },
+ },
+
+ {
+ "unterminated_block_comment.hcl",
+ true,
+ nil,
+ },
+
+ {
+ "unterminated_brace.hcl",
+ true,
+ nil,
+ },
+
+ {
+ "nested_provider_bad.hcl",
+ true,
+ // This is not ideal but without significant rework of the decoder
+ // we get a partial result back as well as an error.
+ map[string]interface{}{},
+ },
+
+ {
+ "object_list.json",
+ false,
+ map[string]interface{}{
+ "resource": []map[string]interface{}{
+ map[string]interface{}{
+ "aws_instance": []map[string]interface{}{
+ map[string]interface{}{
+ "db": []map[string]interface{}{
+ map[string]interface{}{
+ "vpc": "foo",
+ "provisioner": []map[string]interface{}{
+ map[string]interface{}{
+ "file": []map[string]interface{}{
+ map[string]interface{}{
+ "source": "foo",
+ "destination": "bar",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+
+ for _, tc := range cases {
+ t.Logf("Testing: %s", tc.File)
+ d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.File))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ var out interface{}
+ err = Decode(&out, string(d))
+ if (err != nil) != tc.Err {
+ t.Fatalf("Input: %s\n\nError: %s", tc.File, err)
+ }
+
+ if !reflect.DeepEqual(out, tc.Out) {
+ t.Fatalf("Input: %s. Actual, Expected.\n\n%#v\n\n%#v", tc.File, out, tc.Out)
+ }
+
+ var v interface{}
+ err = Unmarshal(d, &v)
+ if (err != nil) != tc.Err {
+ t.Fatalf("Input: %s\n\nError: %s", tc.File, err)
+ }
+
+ if !reflect.DeepEqual(v, tc.Out) {
+ t.Fatalf("Input: %s. Actual, Expected.\n\n%#v\n\n%#v", tc.File, out, tc.Out)
+ }
+ }
+}
+
+func TestDecode_equal(t *testing.T) {
+ cases := []struct {
+ One, Two string
+ }{
+ {
+ "basic.hcl",
+ "basic.json",
+ },
+ {
+ "float.hcl",
+ "float.json",
+ },
+ /*
+ {
+ "structure.hcl",
+ "structure.json",
+ },
+ */
+ {
+ "structure.hcl",
+ "structure_flat.json",
+ },
+ {
+ "terraform_heroku.hcl",
+ "terraform_heroku.json",
+ },
+ }
+
+ for _, tc := range cases {
+ p1 := filepath.Join(fixtureDir, tc.One)
+ p2 := filepath.Join(fixtureDir, tc.Two)
+
+ d1, err := ioutil.ReadFile(p1)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ d2, err := ioutil.ReadFile(p2)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ var i1, i2 interface{}
+ err = Decode(&i1, string(d1))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ err = Decode(&i2, string(d2))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if !reflect.DeepEqual(i1, i2) {
+ t.Fatalf(
+ "%s != %s\n\n%#v\n\n%#v",
+ tc.One, tc.Two,
+ i1, i2)
+ }
+ }
+}
+
+func TestDecode_flatMap(t *testing.T) {
+ var val map[string]map[string]string
+
+ err := Decode(&val, testReadFile(t, "structure_flatmap.hcl"))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ expected := map[string]map[string]string{
+ "foo": map[string]string{
+ "foo": "bar",
+ "key": "7",
+ },
+ }
+
+ if !reflect.DeepEqual(val, expected) {
+ t.Fatalf("Actual: %#v\n\nExpected: %#v", val, expected)
+ }
+}
+
+func TestDecode_structure(t *testing.T) {
+ type V struct {
+ Key int
+ Foo string
+ }
+
+ var actual V
+
+ err := Decode(&actual, testReadFile(t, "flat.hcl"))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ expected := V{
+ Key: 7,
+ Foo: "bar",
+ }
+
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("Actual: %#v\n\nExpected: %#v", actual, expected)
+ }
+}
+
+func TestDecode_structurePtr(t *testing.T) {
+ type V struct {
+ Key int
+ Foo string
+ }
+
+ var actual *V
+
+ err := Decode(&actual, testReadFile(t, "flat.hcl"))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ expected := &V{
+ Key: 7,
+ Foo: "bar",
+ }
+
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("Actual: %#v\n\nExpected: %#v", actual, expected)
+ }
+}
+
+func TestDecode_structureArray(t *testing.T) {
+ // This test is extracted from a failure in Consul (consul.io),
+ // hence the interesting structure naming.
+
+ type KeyPolicyType string
+
+ type KeyPolicy struct {
+ Prefix string `hcl:",key"`
+ Policy KeyPolicyType
+ }
+
+ type Policy struct {
+ Keys []KeyPolicy `hcl:"key,expand"`
+ }
+
+ expected := Policy{
+ Keys: []KeyPolicy{
+ KeyPolicy{
+ Prefix: "",
+ Policy: "read",
+ },
+ KeyPolicy{
+ Prefix: "foo/",
+ Policy: "write",
+ },
+ KeyPolicy{
+ Prefix: "foo/bar/",
+ Policy: "read",
+ },
+ KeyPolicy{
+ Prefix: "foo/bar/baz",
+ Policy: "deny",
+ },
+ },
+ }
+
+ files := []string{
+ "decode_policy.hcl",
+ "decode_policy.json",
+ }
+
+ for _, f := range files {
+ var actual Policy
+
+ err := Decode(&actual, testReadFile(t, f))
+ if err != nil {
+ t.Fatalf("Input: %s\n\nerr: %s", f, err)
+ }
+
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected)
+ }
+ }
+}
+
+func TestDecode_sliceExpand(t *testing.T) {
+ type testInner struct {
+ Name string `hcl:",key"`
+ Key string
+ }
+
+ type testStruct struct {
+ Services []testInner `hcl:"service,expand"`
+ }
+
+ expected := testStruct{
+ Services: []testInner{
+ testInner{
+ Name: "my-service-0",
+ Key: "value",
+ },
+ testInner{
+ Name: "my-service-1",
+ Key: "value",
+ },
+ },
+ }
+
+ files := []string{
+ "slice_expand.hcl",
+ }
+
+ for _, f := range files {
+ t.Logf("Testing: %s", f)
+
+ var actual testStruct
+ err := Decode(&actual, testReadFile(t, f))
+ if err != nil {
+ t.Fatalf("Input: %s\n\nerr: %s", f, err)
+ }
+
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected)
+ }
+ }
+}
+
+func TestDecode_structureMap(t *testing.T) {
+ // This test is extracted from a failure in Terraform (terraform.io),
+ // hence the interesting structure naming.
+
+ type hclVariable struct {
+ Default interface{}
+ Description string
+ Fields []string `hcl:",decodedFields"`
+ }
+
+ type rawConfig struct {
+ Variable map[string]hclVariable
+ }
+
+ expected := rawConfig{
+ Variable: map[string]hclVariable{
+ "foo": hclVariable{
+ Default: "bar",
+ Description: "bar",
+ Fields: []string{"Default", "Description"},
+ },
+
+ "amis": hclVariable{
+ Default: []map[string]interface{}{
+ map[string]interface{}{
+ "east": "foo",
+ },
+ },
+ Fields: []string{"Default"},
+ },
+ },
+ }
+
+ files := []string{
+ "decode_tf_variable.hcl",
+ "decode_tf_variable.json",
+ }
+
+ for _, f := range files {
+ t.Logf("Testing: %s", f)
+
+ var actual rawConfig
+ err := Decode(&actual, testReadFile(t, f))
+ if err != nil {
+ t.Fatalf("Input: %s\n\nerr: %s", f, err)
+ }
+
+ if !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("Input: %s\n\nActual: %#v\n\nExpected: %#v", f, actual, expected)
+ }
+ }
+}
+
+func TestDecode_interfaceNonPointer(t *testing.T) {
+ var value interface{}
+ err := Decode(value, testReadFile(t, "basic_int_string.hcl"))
+ if err == nil {
+ t.Fatal("should error")
+ }
+}
+
+func TestDecode_intString(t *testing.T) {
+ var value struct {
+ Count int
+ }
+
+ err := Decode(&value, testReadFile(t, "basic_int_string.hcl"))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if value.Count != 3 {
+ t.Fatalf("bad: %#v", value.Count)
+ }
+}
+
+func TestDecode_Node(t *testing.T) {
+ // given
+ var value struct {
+ Content ast.Node
+ Nested struct {
+ Content ast.Node
+ }
+ }
+
+ content := `
+content {
+ hello = "world"
+}
+`
+
+ // when
+ err := Decode(&value, content)
+
+ // then
+ if err != nil {
+ t.Errorf("unable to decode content, %v", err)
+ return
+ }
+
+ // verify ast.Node can be decoded later
+ var v map[string]interface{}
+ err = DecodeObject(&v, value.Content)
+ if err != nil {
+ t.Errorf("unable to decode content, %v", err)
+ return
+ }
+
+ if v["hello"] != "world" {
+ t.Errorf("expected mapping to be returned")
+ }
+}
+
+func TestDecode_NestedNode(t *testing.T) {
+ // given
+ var value struct {
+ Nested struct {
+ Content ast.Node
+ }
+ }
+
+ content := `
+nested "content" {
+ hello = "world"
+}
+`
+
+ // when
+ err := Decode(&value, content)
+
+ // then
+ if err != nil {
+ t.Errorf("unable to decode content, %v", err)
+ return
+ }
+
+ // verify ast.Node can be decoded later
+ var v map[string]interface{}
+ err = DecodeObject(&v, value.Nested.Content)
+ if err != nil {
+ t.Errorf("unable to decode content, %v", err)
+ return
+ }
+
+ if v["hello"] != "world" {
+ t.Errorf("expected mapping to be returned")
+ }
+}
+
+// https://github.com/hashicorp/hcl/issues/60
+func TestDecode_topLevelKeys(t *testing.T) {
+ type Template struct {
+ Source string
+ }
+
+ templates := struct {
+ Templates []*Template `hcl:"template"`
+ }{}
+
+ err := Decode(&templates, `
+ template {
+ source = "blah"
+ }
+
+ template {
+ source = "blahblah"
+ }`)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if templates.Templates[0].Source != "blah" {
+ t.Errorf("bad source: %s", templates.Templates[0].Source)
+ }
+
+ if templates.Templates[1].Source != "blahblah" {
+ t.Errorf("bad source: %s", templates.Templates[1].Source)
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go
new file mode 100644
index 0000000000..575a20b50b
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl.go
@@ -0,0 +1,11 @@
+// Package hcl decodes HCL into usable Go structures.
+//
+// hcl input can come in either pure HCL format or JSON format.
+// It can be parsed into an AST, and then decoded into a structure,
+// or it can be decoded directly from a string into a structure.
+//
+// If you choose to parse HCL into a raw AST, the benefit is that you
+// can write custom visitor implementations to implement custom
+// semantic checks. By default, HCL does not perform any semantic
+// checks.
+package hcl
diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
new file mode 100644
index 0000000000..f8bb71a047
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
@@ -0,0 +1,211 @@
+// Package ast declares the types used to represent syntax trees for HCL
+// (HashiCorp Configuration Language)
+package ast
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+// Node is an element in the abstract syntax tree.
+type Node interface {
+ node()
+ Pos() token.Pos
+}
+
+func (File) node() {}
+func (ObjectList) node() {}
+func (ObjectKey) node() {}
+func (ObjectItem) node() {}
+func (Comment) node() {}
+func (CommentGroup) node() {}
+func (ObjectType) node() {}
+func (LiteralType) node() {}
+func (ListType) node() {}
+
+// File represents a single HCL file
+type File struct {
+ Node Node // usually a *ObjectList
+ Comments []*CommentGroup // list of all comments in the source
+}
+
+func (f *File) Pos() token.Pos {
+ return f.Node.Pos()
+}
+
+// ObjectList represents a list of ObjectItems. An HCL file itself is an
+// ObjectList.
+type ObjectList struct {
+ Items []*ObjectItem
+}
+
+func (o *ObjectList) Add(item *ObjectItem) {
+ o.Items = append(o.Items, item)
+}
+
+// Filter filters out the objects with the given key list as a prefix.
+//
+// The returned list of objects contain ObjectItems where the keys have
+// this prefix already stripped off. This might result in objects with
+// zero-length key lists if they have no children.
+//
+// If no matches are found, an empty ObjectList (non-nil) is returned.
+func (o *ObjectList) Filter(keys ...string) *ObjectList {
+ var result ObjectList
+ for _, item := range o.Items {
+ // If there aren't enough keys, then ignore this
+ if len(item.Keys) < len(keys) {
+ continue
+ }
+
+ match := true
+ for i, key := range item.Keys[:len(keys)] {
+ key := key.Token.Value().(string)
+ if key != keys[i] && !strings.EqualFold(key, keys[i]) {
+ match = false
+ break
+ }
+ }
+ if !match {
+ continue
+ }
+
+ // Strip off the prefix from the children
+ newItem := *item
+ newItem.Keys = newItem.Keys[len(keys):]
+ result.Add(&newItem)
+ }
+
+ return &result
+}
+
+// Children returns further nested objects (key length > 0) within this
+// ObjectList. This should be used with Filter to get at child items.
+func (o *ObjectList) Children() *ObjectList {
+ var result ObjectList
+ for _, item := range o.Items {
+ if len(item.Keys) > 0 {
+ result.Add(item)
+ }
+ }
+
+ return &result
+}
+
+// Elem returns items in the list that are direct element assignments
+// (key length == 0). This should be used with Filter to get at elements.
+func (o *ObjectList) Elem() *ObjectList {
+ var result ObjectList
+ for _, item := range o.Items {
+ if len(item.Keys) == 0 {
+ result.Add(item)
+ }
+ }
+
+ return &result
+}
+
+func (o *ObjectList) Pos() token.Pos {
+ // always returns the uninitiliazed position
+ return o.Items[0].Pos()
+}
+
+// ObjectItem represents a HCL Object Item. An item is represented with a key
+// (or keys). It can be an assignment or an object (both normal and nested)
+type ObjectItem struct {
+ // keys is only one length long if it's of type assignment. If it's a
+ // nested object it can be larger than one. In that case "assign" is
+ // invalid as there is no assignments for a nested object.
+ Keys []*ObjectKey
+
+ // assign contains the position of "=", if any
+ Assign token.Pos
+
+ // val is the item itself. It can be an object,list, number, bool or a
+ // string. If key length is larger than one, val can be only of type
+ // Object.
+ Val Node
+
+ LeadComment *CommentGroup // associated lead comment
+ LineComment *CommentGroup // associated line comment
+}
+
+func (o *ObjectItem) Pos() token.Pos {
+ return o.Keys[0].Pos()
+}
+
+// ObjectKeys are either an identifier or of type string.
+type ObjectKey struct {
+ Token token.Token
+}
+
+func (o *ObjectKey) Pos() token.Pos {
+ return o.Token.Pos
+}
+
+// LiteralType represents a literal of basic type. Valid types are:
+// token.NUMBER, token.FLOAT, token.BOOL and token.STRING
+type LiteralType struct {
+ Token token.Token
+
+ // associated line comment, only when used in a list
+ LineComment *CommentGroup
+}
+
+func (l *LiteralType) Pos() token.Pos {
+ return l.Token.Pos
+}
+
+// ListStatement represents a HCL List type
+type ListType struct {
+ Lbrack token.Pos // position of "["
+ Rbrack token.Pos // position of "]"
+ List []Node // the elements in lexical order
+}
+
+func (l *ListType) Pos() token.Pos {
+ return l.Lbrack
+}
+
+func (l *ListType) Add(node Node) {
+ l.List = append(l.List, node)
+}
+
+// ObjectType represents a HCL Object Type
+type ObjectType struct {
+ Lbrace token.Pos // position of "{"
+ Rbrace token.Pos // position of "}"
+ List *ObjectList // the nodes in lexical order
+}
+
+func (o *ObjectType) Pos() token.Pos {
+ return o.Lbrace
+}
+
+// Comment node represents a single //, # style or /*- style commment
+type Comment struct {
+ Start token.Pos // position of / or #
+ Text string
+}
+
+func (c *Comment) Pos() token.Pos {
+ return c.Start
+}
+
+// CommentGroup node represents a sequence of comments with no other tokens and
+// no empty lines between.
+type CommentGroup struct {
+ List []*Comment // len(List) > 0
+}
+
+func (c *CommentGroup) Pos() token.Pos {
+ return c.List[0].Pos()
+}
+
+//-------------------------------------------------------------------
+// GoStringer
+//-------------------------------------------------------------------
+
+func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) }
diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast_test.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast_test.go
new file mode 100644
index 0000000000..942256cadc
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast_test.go
@@ -0,0 +1,200 @@
+package ast
+
+import (
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+func TestObjectListFilter(t *testing.T) {
+ var cases = []struct {
+ Filter []string
+ Input []*ObjectItem
+ Output []*ObjectItem
+ }{
+ {
+ []string{"foo"},
+ []*ObjectItem{
+ &ObjectItem{
+ Keys: []*ObjectKey{
+ &ObjectKey{
+ Token: token.Token{Type: token.STRING, Text: `"foo"`},
+ },
+ },
+ },
+ },
+ []*ObjectItem{
+ &ObjectItem{
+ Keys: []*ObjectKey{},
+ },
+ },
+ },
+
+ {
+ []string{"foo"},
+ []*ObjectItem{
+ &ObjectItem{
+ Keys: []*ObjectKey{
+ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}},
+ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
+ },
+ },
+ &ObjectItem{
+ Keys: []*ObjectKey{
+ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}},
+ },
+ },
+ },
+ []*ObjectItem{
+ &ObjectItem{
+ Keys: []*ObjectKey{
+ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
+ },
+ },
+ },
+ },
+ }
+
+ for _, tc := range cases {
+ input := &ObjectList{Items: tc.Input}
+ expected := &ObjectList{Items: tc.Output}
+ if actual := input.Filter(tc.Filter...); !reflect.DeepEqual(actual, expected) {
+ t.Fatalf("in order: input, expected, actual\n\n%#v\n\n%#v\n\n%#v", input, expected, actual)
+ }
+ }
+}
+
+func TestWalk(t *testing.T) {
+ items := []*ObjectItem{
+ &ObjectItem{
+ Keys: []*ObjectKey{
+ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}},
+ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
+ },
+ Val: &LiteralType{Token: token.Token{Type: token.STRING, Text: `"example"`}},
+ },
+ &ObjectItem{
+ Keys: []*ObjectKey{
+ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}},
+ },
+ },
+ }
+
+ node := &ObjectList{Items: items}
+
+ order := []string{
+ "*ast.ObjectList",
+ "*ast.ObjectItem",
+ "*ast.ObjectKey",
+ "*ast.ObjectKey",
+ "*ast.LiteralType",
+ "*ast.ObjectItem",
+ "*ast.ObjectKey",
+ }
+ count := 0
+
+ Walk(node, func(n Node) (Node, bool) {
+ if n == nil {
+ return n, false
+ }
+
+ typeName := reflect.TypeOf(n).String()
+ if order[count] != typeName {
+ t.Errorf("expected '%s' got: '%s'", order[count], typeName)
+ }
+ count++
+ return n, true
+ })
+}
+
+func TestWalkEquality(t *testing.T) {
+ items := []*ObjectItem{
+ &ObjectItem{
+ Keys: []*ObjectKey{
+ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}},
+ },
+ },
+ &ObjectItem{
+ Keys: []*ObjectKey{
+ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
+ },
+ },
+ }
+
+ node := &ObjectList{Items: items}
+
+ rewritten := Walk(node, func(n Node) (Node, bool) { return n, true })
+
+ newNode, ok := rewritten.(*ObjectList)
+ if !ok {
+ t.Fatalf("expected Objectlist, got %T", rewritten)
+ }
+
+ if !reflect.DeepEqual(node, newNode) {
+ t.Fatal("rewritten node is not equal to the given node")
+ }
+
+ if len(newNode.Items) != 2 {
+ t.Error("expected newNode length 2, got: %d", len(newNode.Items))
+ }
+
+ expected := []string{
+ `"foo"`,
+ `"bar"`,
+ }
+
+ for i, item := range newNode.Items {
+ if len(item.Keys) != 1 {
+ t.Error("expected keys newNode length 1, got: %d", len(item.Keys))
+ }
+
+ if item.Keys[0].Token.Text != expected[i] {
+ t.Errorf("expected key %s, got %s", expected[i], item.Keys[0].Token.Text)
+ }
+
+ if item.Val != nil {
+ t.Errorf("expected item value should be nil")
+ }
+ }
+}
+
+func TestWalkRewrite(t *testing.T) {
+ items := []*ObjectItem{
+ &ObjectItem{
+ Keys: []*ObjectKey{
+ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"foo"`}},
+ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"bar"`}},
+ },
+ },
+ &ObjectItem{
+ Keys: []*ObjectKey{
+ &ObjectKey{Token: token.Token{Type: token.STRING, Text: `"baz"`}},
+ },
+ },
+ }
+
+ node := &ObjectList{Items: items}
+
+ suffix := "_example"
+ node = Walk(node, func(n Node) (Node, bool) {
+ switch i := n.(type) {
+ case *ObjectKey:
+ i.Token.Text = i.Token.Text + suffix
+ n = i
+ }
+ return n, true
+ }).(*ObjectList)
+
+ Walk(node, func(n Node) (Node, bool) {
+ switch i := n.(type) {
+ case *ObjectKey:
+ if !strings.HasSuffix(i.Token.Text, suffix) {
+ t.Errorf("Token '%s' should have suffix: %s", i.Token.Text, suffix)
+ }
+ }
+ return n, true
+ })
+
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
new file mode 100644
index 0000000000..ba07ad42b0
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
@@ -0,0 +1,52 @@
+package ast
+
+import "fmt"
+
+// WalkFunc describes a function to be called for each node during a Walk. The
+// returned node can be used to rewrite the AST. Walking stops the returned
+// bool is false.
+type WalkFunc func(Node) (Node, bool)
+
+// Walk traverses an AST in depth-first order: It starts by calling fn(node);
+// node must not be nil. If fn returns true, Walk invokes fn recursively for
+// each of the non-nil children of node, followed by a call of fn(nil). The
+// returned node of fn can be used to rewrite the passed node to fn.
+func Walk(node Node, fn WalkFunc) Node {
+ rewritten, ok := fn(node)
+ if !ok {
+ return rewritten
+ }
+
+ switch n := node.(type) {
+ case *File:
+ n.Node = Walk(n.Node, fn)
+ case *ObjectList:
+ for i, item := range n.Items {
+ n.Items[i] = Walk(item, fn).(*ObjectItem)
+ }
+ case *ObjectKey:
+ // nothing to do
+ case *ObjectItem:
+ for i, k := range n.Keys {
+ n.Keys[i] = Walk(k, fn).(*ObjectKey)
+ }
+
+ if n.Val != nil {
+ n.Val = Walk(n.Val, fn)
+ }
+ case *LiteralType:
+ // nothing to do
+ case *ListType:
+ for i, l := range n.List {
+ n.List[i] = Walk(l, fn)
+ }
+ case *ObjectType:
+ n.List = Walk(n.List, fn).(*ObjectList)
+ default:
+ // should we panic here?
+ fmt.Printf("unknown type: %T\n", n)
+ }
+
+ fn(nil)
+ return rewritten
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go
new file mode 100644
index 0000000000..afc1e4eb12
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go
@@ -0,0 +1,164 @@
+// Derivative work from:
+// - https://golang.org/src/cmd/gofmt/gofmt.go
+// - https://github.com/fatih/hclfmt
+
+package fmtcmd
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+
+ "github.com/hashicorp/hcl/hcl/printer"
+)
+
+var (
+ ErrWriteStdin = errors.New("cannot use write option with standard input")
+)
+
+type Options struct {
+ List bool // list files whose formatting differs
+ Write bool // write result to (source) file instead of stdout
+ Diff bool // display diffs instead of rewriting files
+}
+
+func isValidFile(f os.FileInfo, extensions []string) bool {
+ if !f.IsDir() && !strings.HasPrefix(f.Name(), ".") {
+ for _, ext := range extensions {
+ if strings.HasSuffix(f.Name(), "."+ext) {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+// If in == nil, the source is the contents of the file with the given filename.
+func processFile(filename string, in io.Reader, out io.Writer, stdin bool, opts Options) error {
+ if in == nil {
+ f, err := os.Open(filename)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ in = f
+ }
+
+ src, err := ioutil.ReadAll(in)
+ if err != nil {
+ return err
+ }
+
+ res, err := printer.Format(src)
+ if err != nil {
+ return err
+ }
+ // Files should end with newlines
+ res = append(res, []byte("\n")...)
+
+ if !bytes.Equal(src, res) {
+ // formatting has changed
+ if opts.List {
+ fmt.Fprintln(out, filename)
+ }
+ if opts.Write {
+ err = ioutil.WriteFile(filename, res, 0644)
+ if err != nil {
+ return err
+ }
+ }
+ if opts.Diff {
+ data, err := diff(src, res)
+ if err != nil {
+ return fmt.Errorf("computing diff: %s", err)
+ }
+ fmt.Fprintf(out, "diff a/%s b/%s\n", filename, filename)
+ out.Write(data)
+ }
+ }
+
+ if !opts.List && !opts.Write && !opts.Diff {
+ _, err = out.Write(res)
+ }
+
+ return err
+}
+
+func walkDir(path string, extensions []string, stdout io.Writer, opts Options) error {
+ visitFile := func(path string, f os.FileInfo, err error) error {
+ if err == nil && isValidFile(f, extensions) {
+ err = processFile(path, nil, stdout, false, opts)
+ }
+ return err
+ }
+
+ return filepath.Walk(path, visitFile)
+}
+
+func Run(
+ paths, extensions []string,
+ stdin io.Reader,
+ stdout io.Writer,
+ opts Options,
+) error {
+ if len(paths) == 0 {
+ if opts.Write {
+ return ErrWriteStdin
+ }
+ if err := processFile("", stdin, stdout, true, opts); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ for _, path := range paths {
+ switch dir, err := os.Stat(path); {
+ case err != nil:
+ return err
+ case dir.IsDir():
+ if err := walkDir(path, extensions, stdout, opts); err != nil {
+ return err
+ }
+ default:
+ if err := processFile(path, nil, stdout, false, opts); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func diff(b1, b2 []byte) (data []byte, err error) {
+ f1, err := ioutil.TempFile("", "")
+ if err != nil {
+ return
+ }
+ defer os.Remove(f1.Name())
+ defer f1.Close()
+
+ f2, err := ioutil.TempFile("", "")
+ if err != nil {
+ return
+ }
+ defer os.Remove(f2.Name())
+ defer f2.Close()
+
+ f1.Write(b1)
+ f2.Write(b2)
+
+ data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput()
+ if len(data) > 0 {
+ // diff exits with a non-zero status when the files don't match.
+ // Ignore that failure as long as we get output.
+ err = nil
+ }
+ return
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd_test.go b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd_test.go
new file mode 100644
index 0000000000..b8cf5ee06a
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd_test.go
@@ -0,0 +1,440 @@
+// +build -windows
+// TODO(jen20): These need fixing on Windows but fmt is not used right now
+// and red CI is making it harder to process other bugs, so ignore until
+// we get around to fixing them.
+
+package fmtcmd
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "sort"
+ "syscall"
+ "testing"
+
+ "github.com/hashicorp/hcl/testhelper"
+)
+
+var fixtureExtensions = []string{"hcl"}
+
+func init() {
+ sort.Sort(ByFilename(fixtures))
+}
+
+func TestIsValidFile(t *testing.T) {
+ const fixtureDir = "./test-fixtures"
+
+ cases := []struct {
+ Path string
+ Expected bool
+ }{
+ {"good.hcl", true},
+ {".hidden.ignore", false},
+ {"file.ignore", false},
+ {"dir.ignore", false},
+ }
+
+ for _, tc := range cases {
+ file, err := os.Stat(filepath.Join(fixtureDir, tc.Path))
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+
+ if res := isValidFile(file, fixtureExtensions); res != tc.Expected {
+ t.Errorf("want: %b, got: %b", tc.Expected, res)
+ }
+ }
+}
+
+func TestRunMultiplePaths(t *testing.T) {
+ path1, err := renderFixtures("")
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ defer os.RemoveAll(path1)
+ path2, err := renderFixtures("")
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ defer os.RemoveAll(path2)
+
+ var expectedOut bytes.Buffer
+ for _, path := range []string{path1, path2} {
+ for _, fixture := range fixtures {
+ if !bytes.Equal(fixture.golden, fixture.input) {
+ expectedOut.WriteString(filepath.Join(path, fixture.filename) + "\n")
+ }
+ }
+ }
+
+ _, stdout := mockIO()
+ err = Run(
+ []string{path1, path2},
+ fixtureExtensions,
+ nil, stdout,
+ Options{
+ List: true,
+ },
+ )
+
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ if stdout.String() != expectedOut.String() {
+ t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
+ }
+}
+
+func TestRunSubDirectories(t *testing.T) {
+ pathParent, err := ioutil.TempDir("", "")
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ defer os.RemoveAll(pathParent)
+
+ path1, err := renderFixtures(pathParent)
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ path2, err := renderFixtures(pathParent)
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+
+ paths := []string{path1, path2}
+ sort.Strings(paths)
+
+ var expectedOut bytes.Buffer
+ for _, path := range paths {
+ for _, fixture := range fixtures {
+ if !bytes.Equal(fixture.golden, fixture.input) {
+ expectedOut.WriteString(filepath.Join(path, fixture.filename) + "\n")
+ }
+ }
+ }
+
+ _, stdout := mockIO()
+ err = Run(
+ []string{pathParent},
+ fixtureExtensions,
+ nil, stdout,
+ Options{
+ List: true,
+ },
+ )
+
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ if stdout.String() != expectedOut.String() {
+ t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
+ }
+}
+
+func TestRunStdin(t *testing.T) {
+ var expectedOut bytes.Buffer
+ for i, fixture := range fixtures {
+ if i != 0 {
+ expectedOut.WriteString("\n")
+ }
+ expectedOut.Write(fixture.golden)
+ }
+
+ stdin, stdout := mockIO()
+ for _, fixture := range fixtures {
+ stdin.Write(fixture.input)
+ }
+
+ err := Run(
+ []string{},
+ fixtureExtensions,
+ stdin, stdout,
+ Options{},
+ )
+
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ if !bytes.Equal(stdout.Bytes(), expectedOut.Bytes()) {
+ t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
+ }
+}
+
+func TestRunStdinAndWrite(t *testing.T) {
+ var expectedOut = []byte{}
+
+ stdin, stdout := mockIO()
+ stdin.WriteString("")
+ err := Run(
+ []string{}, []string{},
+ stdin, stdout,
+ Options{
+ Write: true,
+ },
+ )
+
+ if err != ErrWriteStdin {
+ t.Errorf("error want:\n%s\ngot:\n%s", ErrWriteStdin, err)
+ }
+ if !bytes.Equal(stdout.Bytes(), expectedOut) {
+ t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
+ }
+}
+
+func TestRunFileError(t *testing.T) {
+ path, err := ioutil.TempDir("", "")
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ defer os.RemoveAll(path)
+ filename := filepath.Join(path, "unreadable.hcl")
+
+ var expectedError = &os.PathError{
+ Op: "open",
+ Path: filename,
+ Err: syscall.EACCES,
+ }
+
+ err = ioutil.WriteFile(filename, []byte{}, 0000)
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+
+ _, stdout := mockIO()
+ err = Run(
+ []string{path},
+ fixtureExtensions,
+ nil, stdout,
+ Options{},
+ )
+
+ if !reflect.DeepEqual(err, expectedError) {
+ t.Errorf("error want: %#v, got: %#v", expectedError, err)
+ }
+}
+
+func TestRunNoOptions(t *testing.T) {
+ path, err := renderFixtures("")
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ defer os.RemoveAll(path)
+
+ var expectedOut bytes.Buffer
+ for _, fixture := range fixtures {
+ expectedOut.Write(fixture.golden)
+ }
+
+ _, stdout := mockIO()
+ err = Run(
+ []string{path},
+ fixtureExtensions,
+ nil, stdout,
+ Options{},
+ )
+
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ if stdout.String() != expectedOut.String() {
+ t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
+ }
+}
+
+func TestRunList(t *testing.T) {
+ path, err := renderFixtures("")
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ defer os.RemoveAll(path)
+
+ var expectedOut bytes.Buffer
+ for _, fixture := range fixtures {
+ if !bytes.Equal(fixture.golden, fixture.input) {
+ expectedOut.WriteString(fmt.Sprintln(filepath.Join(path, fixture.filename)))
+ }
+ }
+
+ _, stdout := mockIO()
+ err = Run(
+ []string{path},
+ fixtureExtensions,
+ nil, stdout,
+ Options{
+ List: true,
+ },
+ )
+
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ if stdout.String() != expectedOut.String() {
+ t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout)
+ }
+}
+
+func TestRunWrite(t *testing.T) {
+ path, err := renderFixtures("")
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ defer os.RemoveAll(path)
+
+ _, stdout := mockIO()
+ err = Run(
+ []string{path},
+ fixtureExtensions,
+ nil, stdout,
+ Options{
+ Write: true,
+ },
+ )
+
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ for _, fixture := range fixtures {
+ res, err := ioutil.ReadFile(filepath.Join(path, fixture.filename))
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ if !bytes.Equal(res, fixture.golden) {
+ t.Errorf("file %q contents want:\n%s\ngot:\n%s", fixture.filename, fixture.golden, res)
+ }
+ }
+}
+
+func TestRunDiff(t *testing.T) {
+ path, err := renderFixtures("")
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ defer os.RemoveAll(path)
+
+ var expectedOut bytes.Buffer
+ for _, fixture := range fixtures {
+ if len(fixture.diff) > 0 {
+ expectedOut.WriteString(
+ regexp.QuoteMeta(
+ fmt.Sprintf("diff a/%s/%s b/%s/%s\n", path, fixture.filename, path, fixture.filename),
+ ),
+ )
+ // Need to use regex to ignore datetimes in diff.
+ expectedOut.WriteString(`--- .+?\n`)
+ expectedOut.WriteString(`\+\+\+ .+?\n`)
+ expectedOut.WriteString(regexp.QuoteMeta(string(fixture.diff)))
+ }
+ }
+
+ expectedOutString := testhelper.Unix2dos(expectedOut.String())
+
+ _, stdout := mockIO()
+ err = Run(
+ []string{path},
+ fixtureExtensions,
+ nil, stdout,
+ Options{
+ Diff: true,
+ },
+ )
+
+ if err != nil {
+ t.Errorf("unexpected error: %s", err)
+ }
+ if !regexp.MustCompile(expectedOutString).Match(stdout.Bytes()) {
+ t.Errorf("stdout want match:\n%s\ngot:\n%q", expectedOutString, stdout)
+ }
+}
+
+func mockIO() (stdin, stdout *bytes.Buffer) {
+ return new(bytes.Buffer), new(bytes.Buffer)
+}
+
+type fixture struct {
+ filename string
+ input, golden, diff []byte
+}
+
+type ByFilename []fixture
+
+func (s ByFilename) Len() int { return len(s) }
+func (s ByFilename) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s ByFilename) Less(i, j int) bool { return len(s[i].filename) > len(s[j].filename) }
+
+var fixtures = []fixture{
+ {
+ "noop.hcl",
+ []byte(`resource "aws_security_group" "firewall" {
+ count = 5
+}
+`),
+ []byte(`resource "aws_security_group" "firewall" {
+ count = 5
+}
+`),
+ []byte(``),
+ }, {
+ "align_equals.hcl",
+ []byte(`variable "foo" {
+ default = "bar"
+ description = "bar"
+}
+`),
+ []byte(`variable "foo" {
+ default = "bar"
+ description = "bar"
+}
+`),
+ []byte(`@@ -1,4 +1,4 @@
+ variable "foo" {
+- default = "bar"
++ default = "bar"
+ description = "bar"
+ }
+`),
+ }, {
+ "indentation.hcl",
+ []byte(`provider "aws" {
+ access_key = "foo"
+ secret_key = "bar"
+}
+`),
+ []byte(`provider "aws" {
+ access_key = "foo"
+ secret_key = "bar"
+}
+`),
+ []byte(`@@ -1,4 +1,4 @@
+ provider "aws" {
+- access_key = "foo"
+- secret_key = "bar"
++ access_key = "foo"
++ secret_key = "bar"
+ }
+`),
+ },
+}
+
+// parent can be an empty string, in which case the system's default
+// temporary directory will be used.
+func renderFixtures(parent string) (path string, err error) {
+ path, err = ioutil.TempDir(parent, "")
+ if err != nil {
+ return "", err
+ }
+
+ for _, fixture := range fixtures {
+ err = ioutil.WriteFile(filepath.Join(path, fixture.filename), []byte(fixture.input), 0644)
+ if err != nil {
+ os.RemoveAll(path)
+ return "", err
+ }
+ }
+
+ return path, nil
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/.hidden.ignore b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/.hidden.ignore
new file mode 100644
index 0000000000..9977a2836c
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/.hidden.ignore
@@ -0,0 +1 @@
+invalid
diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/dir.ignore b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/dir.ignore
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/file.ignore b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/file.ignore
new file mode 100644
index 0000000000..9977a2836c
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/file.ignore
@@ -0,0 +1 @@
+invalid
diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/good.hcl b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/test-fixtures/good.hcl
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go
new file mode 100644
index 0000000000..5c99381dfb
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go
@@ -0,0 +1,17 @@
+package parser
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+// PosError is a parse error that contains a position.
+type PosError struct {
+ Pos token.Pos
+ Err error
+}
+
+func (e *PosError) Error() string {
+ return fmt.Sprintf("At %s: %s", e.Pos, e.Err)
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error_test.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error_test.go
new file mode 100644
index 0000000000..32399fec5d
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/error_test.go
@@ -0,0 +1,9 @@
+package parser
+
+import (
+ "testing"
+)
+
+func TestPosError_impl(t *testing.T) {
+ var _ error = new(PosError)
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
new file mode 100644
index 0000000000..cc129b6c7f
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
@@ -0,0 +1,454 @@
+// Package parser implements a parser for HCL (HashiCorp Configuration
+// Language)
+package parser
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/hcl/scanner"
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+type Parser struct {
+ sc *scanner.Scanner
+
+ // Last read token
+ tok token.Token
+ commaPrev token.Token
+
+ comments []*ast.CommentGroup
+ leadComment *ast.CommentGroup // last lead comment
+ lineComment *ast.CommentGroup // last line comment
+
+ enableTrace bool
+ indent int
+ n int // buffer size (max = 1)
+}
+
+func newParser(src []byte) *Parser {
+ return &Parser{
+ sc: scanner.New(src),
+ }
+}
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func Parse(src []byte) (*ast.File, error) {
+ p := newParser(src)
+ return p.Parse()
+}
+
+var errEofToken = errors.New("EOF token found")
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func (p *Parser) Parse() (*ast.File, error) {
+ f := &ast.File{}
+ var err, scerr error
+ p.sc.Error = func(pos token.Pos, msg string) {
+ scerr = &PosError{Pos: pos, Err: errors.New(msg)}
+ }
+
+ f.Node, err = p.objectList()
+ if scerr != nil {
+ return nil, scerr
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ f.Comments = p.comments
+ return f, nil
+}
+
+func (p *Parser) objectList() (*ast.ObjectList, error) {
+ defer un(trace(p, "ParseObjectList"))
+ node := &ast.ObjectList{}
+
+ for {
+ n, err := p.objectItem()
+ if err == errEofToken {
+ break // we are finished
+ }
+
+ // we don't return a nil node, because might want to use already
+ // collected items.
+ if err != nil {
+ return node, err
+ }
+
+ node.Add(n)
+ }
+ return node, nil
+}
+
+func (p *Parser) consumeComment() (comment *ast.Comment, endline int) {
+ endline = p.tok.Pos.Line
+
+ // count the endline if it's multiline comment, ie starting with /*
+ if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' {
+ // don't use range here - no need to decode Unicode code points
+ for i := 0; i < len(p.tok.Text); i++ {
+ if p.tok.Text[i] == '\n' {
+ endline++
+ }
+ }
+ }
+
+ comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text}
+ p.tok = p.sc.Scan()
+ return
+}
+
+func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
+ var list []*ast.Comment
+ endline = p.tok.Pos.Line
+
+ for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n {
+ var comment *ast.Comment
+ comment, endline = p.consumeComment()
+ list = append(list, comment)
+ }
+
+ // add comment group to the comments list
+ comments = &ast.CommentGroup{List: list}
+ p.comments = append(p.comments, comments)
+
+ return
+}
+
+// objectItem parses a single object item
+func (p *Parser) objectItem() (*ast.ObjectItem, error) {
+ defer un(trace(p, "ParseObjectItem"))
+
+ keys, err := p.objectKey()
+ if len(keys) > 0 && err == errEofToken {
+ // We ignore eof token here since it is an error if we didn't
+ // receive a value (but we did receive a key) for the item.
+ err = nil
+ }
+ if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE {
+ // This is a strange boolean statement, but what it means is:
+ // We have keys with no value, and we're likely in an object
+ // (since RBrace ends an object). For this, we set err to nil so
+ // we continue and get the error below of having the wrong value
+ // type.
+ err = nil
+
+ // Reset the token type so we don't think it completed fine. See
+ // objectType which uses p.tok.Type to check if we're done with
+ // the object.
+ p.tok.Type = token.EOF
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ o := &ast.ObjectItem{
+ Keys: keys,
+ }
+
+ if p.leadComment != nil {
+ o.LeadComment = p.leadComment
+ p.leadComment = nil
+ }
+
+ switch p.tok.Type {
+ case token.ASSIGN:
+ o.Assign = p.tok.Pos
+ o.Val, err = p.object()
+ if err != nil {
+ return nil, err
+ }
+ case token.LBRACE:
+ o.Val, err = p.objectType()
+ if err != nil {
+ return nil, err
+ }
+ default:
+ keyStr := make([]string, 0, len(keys))
+ for _, k := range keys {
+ keyStr = append(keyStr, k.Token.Text)
+ }
+
+ return nil, fmt.Errorf(
+ "key '%s' expected start of object ('{') or assignment ('=')",
+ strings.Join(keyStr, " "))
+ }
+
+ // do a look-ahead for line comment
+ p.scan()
+ if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil {
+ o.LineComment = p.lineComment
+ p.lineComment = nil
+ }
+ p.unscan()
+ return o, nil
+}
+
+// objectKey parses an object key and returns a ObjectKey AST
+func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
+ keyCount := 0
+ keys := make([]*ast.ObjectKey, 0)
+
+ for {
+ tok := p.scan()
+ switch tok.Type {
+ case token.EOF:
+ // It is very important to also return the keys here as well as
+ // the error. This is because we need to be able to tell if we
+ // did parse keys prior to finding the EOF, or if we just found
+ // a bare EOF.
+ return keys, errEofToken
+ case token.ASSIGN:
+ // assignment or object only, but not nested objects. this is not
+ // allowed: `foo bar = {}`
+ if keyCount > 1 {
+ return nil, &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type),
+ }
+ }
+
+ if keyCount == 0 {
+ return nil, &PosError{
+ Pos: p.tok.Pos,
+ Err: errors.New("no object keys found!"),
+ }
+ }
+
+ return keys, nil
+ case token.LBRACE:
+ // object
+ return keys, nil
+ case token.IDENT, token.STRING:
+ keyCount++
+ keys = append(keys, &ast.ObjectKey{Token: p.tok})
+ case token.ILLEGAL:
+ fmt.Println("illegal")
+ default:
+ return keys, &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type),
+ }
+ }
+ }
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) object() (ast.Node, error) {
+ defer un(trace(p, "ParseType"))
+ tok := p.scan()
+
+ switch tok.Type {
+ case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC:
+ return p.literalType()
+ case token.LBRACE:
+ return p.objectType()
+ case token.LBRACK:
+ return p.listType()
+ case token.COMMENT:
+ // implement comment
+ case token.EOF:
+ return nil, errEofToken
+ }
+
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf("Unknown token: %+v", tok),
+ }
+}
+
+// objectType parses an object type and returns a ObjectType AST
+func (p *Parser) objectType() (*ast.ObjectType, error) {
+ defer un(trace(p, "ParseObjectType"))
+
+ // we assume that the currently scanned token is a LBRACE
+ o := &ast.ObjectType{
+ Lbrace: p.tok.Pos,
+ }
+
+ l, err := p.objectList()
+
+ // if we hit RBRACE, we are good to go (means we parsed all Items), if it's
+ // not a RBRACE, it's an syntax error and we just return it.
+ if err != nil && p.tok.Type != token.RBRACE {
+ return nil, err
+ }
+
+ // If there is no error, we should be at a RBRACE to end the object
+ if p.tok.Type != token.RBRACE {
+ return nil, fmt.Errorf("object expected closing RBRACE got: %s", p.tok.Type)
+ }
+
+ o.List = l
+ o.Rbrace = p.tok.Pos // advanced via parseObjectList
+ return o, nil
+}
+
+// listType parses a list type and returns a ListType AST
+func (p *Parser) listType() (*ast.ListType, error) {
+ defer un(trace(p, "ParseListType"))
+
+ // we assume that the currently scanned token is a LBRACK
+ l := &ast.ListType{
+ Lbrack: p.tok.Pos,
+ }
+
+ needComma := false
+ for {
+ tok := p.scan()
+ switch tok.Type {
+ case token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
+ if needComma {
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf("unexpected token: %s. Expecting %s", tok.Type, token.COMMA),
+ }
+ }
+
+ node, err := p.literalType()
+ if err != nil {
+ return nil, err
+ }
+
+ l.Add(node)
+ needComma = true
+ case token.COMMA:
+ // get next list item or we are at the end
+ // do a look-ahead for line comment
+ p.scan()
+ if p.lineComment != nil {
+ lit, ok := l.List[len(l.List)-1].(*ast.LiteralType)
+ if ok {
+ lit.LineComment = p.lineComment
+ l.List[len(l.List)-1] = lit
+ p.lineComment = nil
+ }
+ }
+ p.unscan()
+
+ needComma = false
+ continue
+ case token.BOOL:
+ // TODO(arslan) should we support? not supported by HCL yet
+ case token.LBRACK:
+ // TODO(arslan) should we support nested lists? Even though it's
+ // written in README of HCL, it's not a part of the grammar
+ // (not defined in parse.y)
+ case token.RBRACK:
+ // finished
+ l.Rbrack = p.tok.Pos
+ return l, nil
+ default:
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type),
+ }
+ }
+ }
+}
+
+// literalType parses a literal type and returns a LiteralType AST
+func (p *Parser) literalType() (*ast.LiteralType, error) {
+ defer un(trace(p, "ParseLiteral"))
+
+ return &ast.LiteralType{
+ Token: p.tok,
+ }, nil
+}
+
+// scan returns the next token from the underlying scanner. If a token has
+// been unscanned then read that instead. In the process, it collects any
+// comment groups encountered, and remembers the last lead and line comments.
+func (p *Parser) scan() token.Token {
+ // If we have a token on the buffer, then return it.
+ if p.n != 0 {
+ p.n = 0
+ return p.tok
+ }
+
+ // Otherwise read the next token from the scanner and Save it to the buffer
+ // in case we unscan later.
+ prev := p.tok
+ p.tok = p.sc.Scan()
+
+ if p.tok.Type == token.COMMENT {
+ var comment *ast.CommentGroup
+ var endline int
+
+ // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n",
+ // p.tok.Pos.Line, prev.Pos.Line, endline)
+ if p.tok.Pos.Line == prev.Pos.Line {
+ // The comment is on same line as the previous token; it
+ // cannot be a lead comment but may be a line comment.
+ comment, endline = p.consumeCommentGroup(0)
+ if p.tok.Pos.Line != endline {
+ // The next token is on a different line, thus
+ // the last comment group is a line comment.
+ p.lineComment = comment
+ }
+ }
+
+ // consume successor comments, if any
+ endline = -1
+ for p.tok.Type == token.COMMENT {
+ comment, endline = p.consumeCommentGroup(1)
+ }
+
+ if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE {
+ switch p.tok.Type {
+ case token.RBRACE, token.RBRACK:
+ // Do not count for these cases
+ default:
+ // The next token is following on the line immediately after the
+ // comment group, thus the last comment group is a lead comment.
+ p.leadComment = comment
+ }
+ }
+
+ }
+
+ return p.tok
+}
+
+// unscan pushes the previously read token back onto the buffer.
+func (p *Parser) unscan() {
+ p.n = 1
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *Parser) printTrace(a ...interface{}) {
+ if !p.enableTrace {
+ return
+ }
+
+ const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+ const n = len(dots)
+ fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
+
+ i := 2 * p.indent
+ for i > n {
+ fmt.Print(dots)
+ i -= n
+ }
+ // i <= n
+ fmt.Print(dots[0:i])
+ fmt.Println(a...)
+}
+
+func trace(p *Parser, msg string) *Parser {
+ p.printTrace(msg, "(")
+ p.indent++
+ return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *Parser) {
+ p.indent--
+ p.printTrace(")")
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go
new file mode 100644
index 0000000000..2ef830faa2
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser_test.go
@@ -0,0 +1,342 @@
+package parser
+
+import (
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "testing"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+func TestType(t *testing.T) {
+ var literals = []struct {
+ typ token.Type
+ src string
+ }{
+ {token.STRING, `foo = "foo"`},
+ {token.NUMBER, `foo = 123`},
+ {token.NUMBER, `foo = -29`},
+ {token.FLOAT, `foo = 123.12`},
+ {token.FLOAT, `foo = -123.12`},
+ {token.BOOL, `foo = true`},
+ {token.HEREDOC, "foo = < 0 {
+ commented = true
+ buf.WriteByte(newline)
+ }
+
+ buf.Write(p.indent([]byte(comment.Text)))
+ buf.WriteByte(newline)
+ if index != len(o.List.Items) {
+ buf.WriteByte(newline) // do not print on the end
+ }
+ }
+ }
+ }
+
+ if index == len(o.List.Items) {
+ p.prev = o.Rbrace
+ break
+ }
+
+ // At this point we are sure that it's not a totally empty block: print
+ // the initial newline if it hasn't been printed yet by the previous
+ // block about standalone comments.
+ if !newlinePrinted {
+ buf.WriteByte(newline)
+ newlinePrinted = true
+ }
+
+ // check if we have adjacent one liner items. If yes we'll going to align
+ // the comments.
+ var aligned []*ast.ObjectItem
+ for _, item := range o.List.Items[index:] {
+ // we don't group one line lists
+ if len(o.List.Items) == 1 {
+ break
+ }
+
+ // one means a oneliner with out any lead comment
+ // two means a oneliner with lead comment
+ // anything else might be something else
+ cur := lines(string(p.objectItem(item)))
+ if cur > 2 {
+ break
+ }
+
+ curPos := item.Pos()
+
+ nextPos := token.Pos{}
+ if index != len(o.List.Items)-1 {
+ nextPos = o.List.Items[index+1].Pos()
+ }
+
+ prevPos := token.Pos{}
+ if index != 0 {
+ prevPos = o.List.Items[index-1].Pos()
+ }
+
+ // fmt.Println("DEBUG ----------------")
+ // fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos)
+ // fmt.Printf("cur = %+v curPos: %s\n", cur, curPos)
+ // fmt.Printf("next = %+v nextPos: %s\n", next, nextPos)
+
+ if curPos.Line+1 == nextPos.Line {
+ aligned = append(aligned, item)
+ index++
+ continue
+ }
+
+ if curPos.Line-1 == prevPos.Line {
+ aligned = append(aligned, item)
+ index++
+
+ // finish if we have a new line or comment next. This happens
+ // if the next item is not adjacent
+ if curPos.Line+1 != nextPos.Line {
+ break
+ }
+ continue
+ }
+
+ break
+ }
+
+ // put newlines if the items are between other non aligned items.
+ // newlines are also added if there is a standalone comment already, so
+ // check it too
+ if !commented && index != len(aligned) {
+ buf.WriteByte(newline)
+ }
+
+ if len(aligned) >= 1 {
+ p.prev = aligned[len(aligned)-1].Pos()
+
+ items := p.alignedItems(aligned)
+ buf.Write(p.indent(items))
+ } else {
+ p.prev = o.List.Items[index].Pos()
+
+ buf.Write(p.indent(p.objectItem(o.List.Items[index])))
+ index++
+ }
+
+ buf.WriteByte(newline)
+ }
+
+ buf.WriteString("}")
+ return buf.Bytes()
+}
+
+func (p *printer) alignedItems(items []*ast.ObjectItem) []byte {
+ var buf bytes.Buffer
+
+ // find the longest key and value length, needed for alignment
+ var longestKeyLen int // longest key length
+ var longestValLen int // longest value length
+ for _, item := range items {
+ key := len(item.Keys[0].Token.Text)
+ val := len(p.output(item.Val))
+
+ if key > longestKeyLen {
+ longestKeyLen = key
+ }
+
+ if val > longestValLen {
+ longestValLen = val
+ }
+ }
+
+ for i, item := range items {
+ if item.LeadComment != nil {
+ for _, comment := range item.LeadComment.List {
+ buf.WriteString(comment.Text)
+ buf.WriteByte(newline)
+ }
+ }
+
+ for i, k := range item.Keys {
+ keyLen := len(k.Token.Text)
+ buf.WriteString(k.Token.Text)
+ for i := 0; i < longestKeyLen-keyLen+1; i++ {
+ buf.WriteByte(blank)
+ }
+
+ // reach end of key
+ if i == len(item.Keys)-1 && len(item.Keys) == 1 {
+ buf.WriteString("=")
+ buf.WriteByte(blank)
+ }
+ }
+
+ val := p.output(item.Val)
+ valLen := len(val)
+ buf.Write(val)
+
+ if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil {
+ for i := 0; i < longestValLen-valLen+1; i++ {
+ buf.WriteByte(blank)
+ }
+
+ for _, comment := range item.LineComment.List {
+ buf.WriteString(comment.Text)
+ }
+ }
+
+ // do not print for the last item
+ if i != len(items)-1 {
+ buf.WriteByte(newline)
+ }
+ }
+
+ return buf.Bytes()
+}
+
+// list returns the printable HCL form of an list type.
+func (p *printer) list(l *ast.ListType) []byte {
+ var buf bytes.Buffer
+ buf.WriteString("[")
+
+ var longestLine int
+ for _, item := range l.List {
+ // for now we assume that the list only contains literal types
+ if lit, ok := item.(*ast.LiteralType); ok {
+ lineLen := len(lit.Token.Text)
+ if lineLen > longestLine {
+ longestLine = lineLen
+ }
+ }
+ }
+
+ insertSpaceBeforeItem := false
+ for i, item := range l.List {
+ if item.Pos().Line != l.Lbrack.Line {
+ // multiline list, add newline before we add each item
+ buf.WriteByte(newline)
+ insertSpaceBeforeItem = false
+ // also indent each line
+ val := p.output(item)
+ curLen := len(val)
+ buf.Write(p.indent(val))
+ buf.WriteString(",")
+
+ if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil {
+ // if the next item doesn't have any comments, do not align
+ buf.WriteByte(blank) // align one space
+ for i := 0; i < longestLine-curLen; i++ {
+ buf.WriteByte(blank)
+ }
+
+ for _, comment := range lit.LineComment.List {
+ buf.WriteString(comment.Text)
+ }
+ }
+
+ if i == len(l.List)-1 {
+ buf.WriteByte(newline)
+ }
+ } else {
+ if insertSpaceBeforeItem {
+ buf.WriteByte(blank)
+ insertSpaceBeforeItem = false
+ }
+ buf.Write(p.output(item))
+ if i != len(l.List)-1 {
+ buf.WriteString(",")
+ insertSpaceBeforeItem = true
+ }
+ }
+
+ }
+
+ buf.WriteString("]")
+ return buf.Bytes()
+}
+
+// indent indents the lines of the given buffer for each non-empty line
+func (p *printer) indent(buf []byte) []byte {
+ var prefix []byte
+ if p.cfg.SpacesWidth != 0 {
+ for i := 0; i < p.cfg.SpacesWidth; i++ {
+ prefix = append(prefix, blank)
+ }
+ } else {
+ prefix = []byte{tab}
+ }
+
+ var res []byte
+ bol := true
+ for _, c := range buf {
+ if bol && c != '\n' {
+ res = append(res, prefix...)
+ }
+
+ res = append(res, c)
+ bol = c == '\n'
+ }
+ return res
+}
+
+// unindent removes all the indentation from the tombstoned lines
+func (p *printer) unindent(buf []byte) []byte {
+ var res []byte
+ for i := 0; i < len(buf); i++ {
+ skip := len(buf)-i <= len(unindent)
+ if !skip {
+ skip = !bytes.Equal(unindent, buf[i:i+len(unindent)])
+ }
+ if skip {
+ res = append(res, buf[i])
+ continue
+ }
+
+ // We have a marker. we have to backtrace here and clean out
+ // any whitespace ahead of our tombstone up to a \n
+ for j := len(res) - 1; j >= 0; j-- {
+ if res[j] == '\n' {
+ break
+ }
+
+ res = res[:j]
+ }
+
+ // Skip the entire unindent marker
+ i += len(unindent) - 1
+ }
+
+ return res
+}
+
+// heredocIndent marks all the 2nd and further lines as unindentable
+func (p *printer) heredocIndent(buf []byte) []byte {
+ var res []byte
+ bol := false
+ for _, c := range buf {
+ if bol && c != '\n' {
+ res = append(res, unindent...)
+ }
+ res = append(res, c)
+ bol = c == '\n'
+ }
+ return res
+}
+
+func lines(txt string) int {
+ endline := 1
+ for i := 0; i < len(txt); i++ {
+ if txt[i] == '\n' {
+ endline++
+ }
+ }
+ return endline
+}
+
+// ----------------------------------------------------------------------------
+// Tracing support
+
+func (p *printer) printTrace(a ...interface{}) {
+ if !p.enableTrace {
+ return
+ }
+
+ const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+ const n = len(dots)
+ i := 2 * p.indentTrace
+ for i > n {
+ fmt.Print(dots)
+ i -= n
+ }
+ // i <= n
+ fmt.Print(dots[0:i])
+ fmt.Println(a...)
+}
+
+func trace(p *printer, msg string) *printer {
+ p.printTrace(msg, "(")
+ p.indentTrace++
+ return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *printer) {
+ p.indentTrace--
+ p.printTrace(")")
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go
new file mode 100644
index 0000000000..fb9df58d4b
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go
@@ -0,0 +1,64 @@
+// Package printer implements printing of AST nodes to HCL format.
+package printer
+
+import (
+ "bytes"
+ "io"
+ "text/tabwriter"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/hcl/parser"
+)
+
+var DefaultConfig = Config{
+ SpacesWidth: 2,
+}
+
+// A Config node controls the output of Fprint.
+type Config struct {
+ SpacesWidth int // if set, it will use spaces instead of tabs for alignment
+}
+
+func (c *Config) Fprint(output io.Writer, node ast.Node) error {
+ p := &printer{
+ cfg: *c,
+ comments: make([]*ast.CommentGroup, 0),
+ standaloneComments: make([]*ast.CommentGroup, 0),
+ // enableTrace: true,
+ }
+
+ p.collectComments(node)
+
+ if _, err := output.Write(p.unindent(p.output(node))); err != nil {
+ return err
+ }
+
+ // flush tabwriter, if any
+ var err error
+ if tw, _ := output.(*tabwriter.Writer); tw != nil {
+ err = tw.Flush()
+ }
+
+ return err
+}
+
+// Fprint "pretty-prints" an HCL node to output
+// It calls Config.Fprint with default settings.
+func Fprint(output io.Writer, node ast.Node) error {
+ return DefaultConfig.Fprint(output, node)
+}
+
+// Format formats src HCL and returns the result.
+func Format(src []byte) ([]byte, error) {
+ node, err := parser.Parse(src)
+ if err != nil {
+ return nil, err
+ }
+
+ var buf bytes.Buffer
+ if err := DefaultConfig.Fprint(&buf, node); err != nil {
+ return nil, err
+ }
+
+ return buf.Bytes(), nil
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go
new file mode 100644
index 0000000000..6e1c7ceb5c
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go
@@ -0,0 +1,149 @@
+// +build -windows
+// TODO(jen20): These need fixing on Windows but printer is not used right now
+// and red CI is making it harder to process other bugs, so ignore until
+// we get around to fixing them.package printer
+
+package printer
+
+import (
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "testing"
+
+ "github.com/hashicorp/hcl/hcl/parser"
+)
+
+var update = flag.Bool("update", false, "update golden files")
+
+const (
+ dataDir = "testdata"
+)
+
+type entry struct {
+ source, golden string
+}
+
+// Use go test -update to create/update the respective golden files.
+var data = []entry{
+ {"complexhcl.input", "complexhcl.golden"},
+ {"list.input", "list.golden"},
+ {"comment.input", "comment.golden"},
+ {"comment_aligned.input", "comment_aligned.golden"},
+ {"comment_standalone.input", "comment_standalone.golden"},
+ {"empty_block.input", "empty_block.golden"},
+}
+
+func TestFiles(t *testing.T) {
+ for _, e := range data {
+ source := filepath.Join(dataDir, e.source)
+ golden := filepath.Join(dataDir, e.golden)
+ check(t, source, golden)
+ }
+}
+
+func check(t *testing.T, source, golden string) {
+ src, err := ioutil.ReadFile(source)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ res, err := format(src)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ // update golden files if necessary
+ if *update {
+ if err := ioutil.WriteFile(golden, res, 0644); err != nil {
+ t.Error(err)
+ }
+ return
+ }
+
+ // get golden
+ gld, err := ioutil.ReadFile(golden)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ // formatted source and golden must be the same
+ if err := diff(source, golden, res, gld); err != nil {
+ t.Error(err)
+ return
+ }
+}
+
+// diff compares a and b.
+func diff(aname, bname string, a, b []byte) error {
+ var buf bytes.Buffer // holding long error message
+
+ // compare lengths
+ if len(a) != len(b) {
+ fmt.Fprintf(&buf, "\nlength changed: len(%s) = %d, len(%s) = %d", aname, len(a), bname, len(b))
+ }
+
+ // compare contents
+ line := 1
+ offs := 1
+ for i := 0; i < len(a) && i < len(b); i++ {
+ ch := a[i]
+ if ch != b[i] {
+ fmt.Fprintf(&buf, "\n%s:%d:%d: %s", aname, line, i-offs+1, lineAt(a, offs))
+ fmt.Fprintf(&buf, "\n%s:%d:%d: %s", bname, line, i-offs+1, lineAt(b, offs))
+ fmt.Fprintf(&buf, "\n\n")
+ break
+ }
+ if ch == '\n' {
+ line++
+ offs = i + 1
+ }
+ }
+
+ if buf.Len() > 0 {
+ return errors.New(buf.String())
+ }
+ return nil
+}
+
+// format parses src, prints the corresponding AST, verifies the resulting
+// src is syntactically correct, and returns the resulting src or an error
+// if any.
+func format(src []byte) ([]byte, error) {
+ // parse src
+ node, err := parser.Parse(src)
+ if err != nil {
+ return nil, fmt.Errorf("parse: %s\n%s", err, src)
+ }
+
+ var buf bytes.Buffer
+
+ cfg := &Config{}
+ if err := cfg.Fprint(&buf, node); err != nil {
+ return nil, fmt.Errorf("print: %s", err)
+ }
+
+ // make sure formatted output is syntactically correct
+ res := buf.Bytes()
+
+ if _, err := parser.Parse(src); err != nil {
+ return nil, fmt.Errorf("parse: %s\n%s", err, src)
+ }
+
+ return res, nil
+}
+
+// lineAt returns the line in text starting at offset offs.
+func lineAt(text []byte, offs int) []byte {
+ i := offs
+ for i < len(text) && text[i] != '\n' {
+ i++
+ }
+ return text[offs:i]
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.golden
new file mode 100644
index 0000000000..e86215f53f
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.golden
@@ -0,0 +1,36 @@
+// A standalone comment is a comment which is not attached to any kind of node
+
+// This comes from Terraform, as a test
+variable "foo" {
+ # Standalone comment should be still here
+
+ default = "bar"
+ description = "bar" # yooo
+}
+
+/* This is a multi line standalone
+comment*/
+
+// fatih arslan
+/* This is a developer test
+account and a multine comment */
+developer = ["fatih", "arslan"] // fatih arslan
+
+# One line here
+numbers = [1, 2] // another line here
+
+# Another comment
+variable = {
+ description = "bar" # another yooo
+
+ foo {
+ # Nested standalone
+
+ bar = "fatih"
+ }
+}
+
+// lead comment
+foo {
+ bar = "fatih" // line comment 2
+} // line comment 3
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.input
new file mode 100644
index 0000000000..57c37ac1de
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.input
@@ -0,0 +1,37 @@
+// A standalone comment is a comment which is not attached to any kind of node
+
+ // This comes from Terraform, as a test
+variable "foo" {
+ # Standalone comment should be still here
+
+ default = "bar"
+ description = "bar" # yooo
+}
+
+/* This is a multi line standalone
+comment*/
+
+
+// fatih arslan
+/* This is a developer test
+account and a multine comment */
+developer = [ "fatih", "arslan"] // fatih arslan
+
+# One line here
+numbers = [1,2] // another line here
+
+ # Another comment
+variable = {
+ description = "bar" # another yooo
+ foo {
+ # Nested standalone
+
+ bar = "fatih"
+ }
+}
+
+ // lead comment
+foo {
+ bar = "fatih" // line comment 2
+} // line comment 3
+
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.golden
new file mode 100644
index 0000000000..e8469e5c4f
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.golden
@@ -0,0 +1,32 @@
+aligned {
+ # We have some aligned items below
+ foo = "fatih" # yoo1
+ default = "bar" # yoo2
+ bar = "bar and foo" # yoo3
+
+ default = {
+ bar = "example"
+ }
+
+ #deneme arslan
+ fatih = ["fatih"] # yoo4
+
+ #fatih arslan
+ fatiharslan = ["arslan"] // yoo5
+
+ default = {
+ bar = "example"
+ }
+
+ security_groups = [
+ "foo", # kenya 1
+ "${aws_security_group.firewall.foo}", # kenya 2
+ ]
+
+ security_groups2 = [
+ "foo", # kenya 1
+ "bar", # kenya 1.5
+ "${aws_security_group.firewall.foo}", # kenya 2
+ "foobar", # kenya 3
+ ]
+}
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.input
new file mode 100644
index 0000000000..bd43ab1adc
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_aligned.input
@@ -0,0 +1,28 @@
+aligned {
+# We have some aligned items below
+ foo = "fatih" # yoo1
+ default = "bar" # yoo2
+ bar = "bar and foo" # yoo3
+ default = {
+ bar = "example"
+ }
+ #deneme arslan
+ fatih = ["fatih"] # yoo4
+ #fatih arslan
+ fatiharslan = ["arslan"] // yoo5
+ default = {
+ bar = "example"
+ }
+
+security_groups = [
+ "foo", # kenya 1
+ "${aws_security_group.firewall.foo}", # kenya 2
+]
+
+security_groups2 = [
+ "foo", # kenya 1
+ "bar", # kenya 1.5
+ "${aws_security_group.firewall.foo}", # kenya 2
+ "foobar", # kenya 3
+]
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_standalone.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_standalone.golden
new file mode 100644
index 0000000000..962dbf2b36
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_standalone.golden
@@ -0,0 +1,16 @@
+// A standalone comment
+
+aligned {
+ # Standalone 1
+
+ a = "bar" # yoo1
+ default = "bar" # yoo2
+
+ # Standalone 2
+}
+
+# Standalone 3
+
+numbers = [1, 2] // another line here
+
+# Standalone 4
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_standalone.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_standalone.input
new file mode 100644
index 0000000000..4436cb16c0
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_standalone.input
@@ -0,0 +1,16 @@
+// A standalone comment
+
+aligned {
+ # Standalone 1
+
+ a = "bar" # yoo1
+ default = "bar" # yoo2
+
+ # Standalone 2
+}
+
+ # Standalone 3
+
+numbers = [1,2] // another line here
+
+ # Standalone 4
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/complexhcl.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/complexhcl.golden
new file mode 100644
index 0000000000..b733a27e46
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/complexhcl.golden
@@ -0,0 +1,54 @@
+variable "foo" {
+ default = "bar"
+ description = "bar"
+}
+
+developer = ["fatih", "arslan"]
+
+provider "aws" {
+ access_key = "foo"
+ secret_key = "bar"
+}
+
+provider "do" {
+ api_key = "${var.foo}"
+}
+
+resource "aws_security_group" "firewall" {
+ count = 5
+}
+
+resource aws_instance "web" {
+ ami = "${var.foo}"
+
+ security_groups = [
+ "foo",
+ "${aws_security_group.firewall.foo}",
+ ]
+
+ network_interface {
+ device_index = 0
+ description = "Main network interface"
+ }
+
+ network_interface = {
+ device_index = 1
+
+ description = < 0 {
+ // common case: last character was not a '\n'
+ s.tokPos.Line = s.srcPos.Line
+ s.tokPos.Column = s.srcPos.Column
+ } else {
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ s.tokPos.Line = s.srcPos.Line - 1
+ s.tokPos.Column = s.lastLineLen
+ }
+
+ switch {
+ case isLetter(ch):
+ tok = token.IDENT
+ lit := s.scanIdentifier()
+ if lit == "true" || lit == "false" {
+ tok = token.BOOL
+ }
+ case isDecimal(ch):
+ tok = s.scanNumber(ch)
+ default:
+ switch ch {
+ case eof:
+ tok = token.EOF
+ case '"':
+ tok = token.STRING
+ s.scanString()
+ case '#', '/':
+ tok = token.COMMENT
+ s.scanComment(ch)
+ case '.':
+ tok = token.PERIOD
+ ch = s.peek()
+ if isDecimal(ch) {
+ tok = token.FLOAT
+ ch = s.scanMantissa(ch)
+ ch = s.scanExponent(ch)
+ }
+ case '<':
+ tok = token.HEREDOC
+ s.scanHeredoc()
+ case '[':
+ tok = token.LBRACK
+ case ']':
+ tok = token.RBRACK
+ case '{':
+ tok = token.LBRACE
+ case '}':
+ tok = token.RBRACE
+ case ',':
+ tok = token.COMMA
+ case '=':
+ tok = token.ASSIGN
+ case '+':
+ tok = token.ADD
+ case '-':
+ if isDecimal(s.peek()) {
+ ch := s.next()
+ tok = s.scanNumber(ch)
+ } else {
+ tok = token.SUB
+ }
+ default:
+ s.err("illegal char")
+ }
+ }
+
+ // finish token ending
+ s.tokEnd = s.srcPos.Offset
+
+ // create token literal
+ var tokenText string
+ if s.tokStart >= 0 {
+ tokenText = string(s.src[s.tokStart:s.tokEnd])
+ }
+ s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
+
+ return token.Token{
+ Type: tok,
+ Pos: s.tokPos,
+ Text: tokenText,
+ }
+}
+
+func (s *Scanner) scanComment(ch rune) {
+ // single line comments
+ if ch == '#' || (ch == '/' && s.peek() != '*') {
+ ch = s.next()
+ for ch != '\n' && ch >= 0 && ch != eof {
+ ch = s.next()
+ }
+ if ch != eof && ch >= 0 {
+ s.unread()
+ }
+ return
+ }
+
+ // be sure we get the character after /* This allows us to find comment's
+ // that are not erminated
+ if ch == '/' {
+ s.next()
+ ch = s.next() // read character after "/*"
+ }
+
+ // look for /* - style comments
+ for {
+ if ch < 0 || ch == eof {
+ s.err("comment not terminated")
+ break
+ }
+
+ ch0 := ch
+ ch = s.next()
+ if ch0 == '*' && ch == '/' {
+ break
+ }
+ }
+}
+
+// scanNumber scans a HCL number definition starting with the given rune
+func (s *Scanner) scanNumber(ch rune) token.Type {
+ if ch == '0' {
+ // check for hexadecimal, octal or float
+ ch = s.next()
+ if ch == 'x' || ch == 'X' {
+ // hexadecimal
+ ch = s.next()
+ found := false
+ for isHexadecimal(ch) {
+ ch = s.next()
+ found = true
+ }
+
+ if !found {
+ s.err("illegal hexadecimal number")
+ }
+
+ if ch != eof {
+ s.unread()
+ }
+
+ return token.NUMBER
+ }
+
+ // now it's either something like: 0421(octal) or 0.1231(float)
+ illegalOctal := false
+ for isDecimal(ch) {
+ ch = s.next()
+ if ch == '8' || ch == '9' {
+ // this is just a possibility. For example 0159 is illegal, but
+ // 0159.23 is valid. So we mark a possible illegal octal. If
+ // the next character is not a period, we'll print the error.
+ illegalOctal = true
+ }
+ }
+
+ if ch == 'e' || ch == 'E' {
+ ch = s.scanExponent(ch)
+ return token.FLOAT
+ }
+
+ if ch == '.' {
+ ch = s.scanFraction(ch)
+
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ ch = s.scanExponent(ch)
+ }
+ return token.FLOAT
+ }
+
+ if illegalOctal {
+ s.err("illegal octal number")
+ }
+
+ if ch != eof {
+ s.unread()
+ }
+ return token.NUMBER
+ }
+
+ s.scanMantissa(ch)
+ ch = s.next() // seek forward
+ if ch == 'e' || ch == 'E' {
+ ch = s.scanExponent(ch)
+ return token.FLOAT
+ }
+
+ if ch == '.' {
+ ch = s.scanFraction(ch)
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ ch = s.scanExponent(ch)
+ }
+ return token.FLOAT
+ }
+
+ if ch != eof {
+ s.unread()
+ }
+ return token.NUMBER
+}
+
+// scanMantissa scans the mantissa begining from the rune. It returns the next
+// non decimal rune. It's used to determine wheter it's a fraction or exponent.
+func (s *Scanner) scanMantissa(ch rune) rune {
+ scanned := false
+ for isDecimal(ch) {
+ ch = s.next()
+ scanned = true
+ }
+
+ if scanned && ch != eof {
+ s.unread()
+ }
+ return ch
+}
+
+// scanFraction scans the fraction after the '.' rune
+func (s *Scanner) scanFraction(ch rune) rune {
+ if ch == '.' {
+ ch = s.peek() // we peek just to see if we can move forward
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
+// rune.
+func (s *Scanner) scanExponent(ch rune) rune {
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ if ch == '-' || ch == '+' {
+ ch = s.next()
+ }
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+// scanHeredoc scans a heredoc string
+func (s *Scanner) scanHeredoc() {
+ // Scan the second '<' in example: '<= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) {
+ break
+ }
+
+ // Not an anchor match, record the start of a new line
+ lineStart = s.srcPos.Offset
+ }
+
+ if ch == eof {
+ s.err("heredoc not terminated")
+ return
+ }
+ }
+
+ return
+}
+
+// scanString scans a quoted string
+func (s *Scanner) scanString() {
+ braces := 0
+ for {
+ // '"' opening already consumed
+ // read character after quote
+ ch := s.next()
+
+ if ch == '\n' || ch < 0 || ch == eof {
+ s.err("literal not terminated")
+ return
+ }
+
+ if ch == '"' && braces == 0 {
+ break
+ }
+
+ // If we're going into a ${} then we can ignore quotes for awhile
+ if braces == 0 && ch == '$' && s.peek() == '{' {
+ braces++
+ s.next()
+ } else if braces > 0 && ch == '{' {
+ braces++
+ }
+ if braces > 0 && ch == '}' {
+ braces--
+ }
+
+ if ch == '\\' {
+ s.scanEscape()
+ }
+ }
+
+ return
+}
+
+// scanEscape scans an escape sequence
+func (s *Scanner) scanEscape() rune {
+ // http://en.cppreference.com/w/cpp/language/escape
+ ch := s.next() // read character after '/'
+ switch ch {
+ case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
+ // nothing to do
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ // octal notation
+ ch = s.scanDigits(ch, 8, 3)
+ case 'x':
+ // hexademical notation
+ ch = s.scanDigits(s.next(), 16, 2)
+ case 'u':
+ // universal character name
+ ch = s.scanDigits(s.next(), 16, 4)
+ case 'U':
+ // universal character name
+ ch = s.scanDigits(s.next(), 16, 8)
+ default:
+ s.err("illegal char escape")
+ }
+ return ch
+}
+
+// scanDigits scans a rune with the given base for n times. For example an
+// octal notation \184 would yield in scanDigits(ch, 8, 3)
+func (s *Scanner) scanDigits(ch rune, base, n int) rune {
+ for n > 0 && digitVal(ch) < base {
+ ch = s.next()
+ n--
+ }
+ if n > 0 {
+ s.err("illegal char escape")
+ }
+
+ // we scanned all digits, put the last non digit char back
+ s.unread()
+ return ch
+}
+
+// scanIdentifier scans an identifier and returns the literal string
+func (s *Scanner) scanIdentifier() string {
+ offs := s.srcPos.Offset - s.lastCharLen
+ ch := s.next()
+ for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' {
+ ch = s.next()
+ }
+
+ if ch != eof {
+ s.unread() // we got identifier, put back latest char
+ }
+
+ return string(s.src[offs:s.srcPos.Offset])
+}
+
+// recentPosition returns the position of the character immediately after the
+// character or token returned by the last call to Scan.
+func (s *Scanner) recentPosition() (pos token.Pos) {
+ pos.Offset = s.srcPos.Offset - s.lastCharLen
+ switch {
+ case s.srcPos.Column > 0:
+ // common case: last character was not a '\n'
+ pos.Line = s.srcPos.Line
+ pos.Column = s.srcPos.Column
+ case s.lastLineLen > 0:
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ pos.Line = s.srcPos.Line - 1
+ pos.Column = s.lastLineLen
+ default:
+ // at the beginning of the source
+ pos.Line = 1
+ pos.Column = 1
+ }
+ return
+}
+
+// err prints the error of any scanning to s.Error function. If the function is
+// not defined, by default it prints them to os.Stderr
+func (s *Scanner) err(msg string) {
+ s.ErrorCount++
+ pos := s.recentPosition()
+
+ if s.Error != nil {
+ s.Error(pos, msg)
+ return
+ }
+
+ fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
+}
+
+// isHexadecimal returns true if the given rune is a letter
+func isLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
+}
+
+// isDigit returns true if the given rune is a decimal digit
+func isDigit(ch rune) bool {
+ return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
+}
+
+// isDecimal returns true if the given rune is a decimal number
+func isDecimal(ch rune) bool {
+ return '0' <= ch && ch <= '9'
+}
+
+// isHexadecimal returns true if the given rune is an hexadecimal number
+func isHexadecimal(ch rune) bool {
+ return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
+}
+
+// isWhitespace returns true if the rune is a space, tab, newline or carriage return
+func isWhitespace(ch rune) bool {
+ return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
+}
+
+// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
+func digitVal(ch rune) int {
+ switch {
+ case '0' <= ch && ch <= '9':
+ return int(ch - '0')
+ case 'a' <= ch && ch <= 'f':
+ return int(ch - 'a' + 10)
+ case 'A' <= ch && ch <= 'F':
+ return int(ch - 'A' + 10)
+ }
+ return 16 // larger than any legal digit val
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go
new file mode 100644
index 0000000000..3c6dd32170
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go
@@ -0,0 +1,554 @@
+package scanner
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+
+ "strings"
+
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+var f100 = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
+
+type tokenPair struct {
+ tok token.Type
+ text string
+}
+
+var tokenLists = map[string][]tokenPair{
+ "comment": []tokenPair{
+ {token.COMMENT, "//"},
+ {token.COMMENT, "////"},
+ {token.COMMENT, "// comment"},
+ {token.COMMENT, "// /* comment */"},
+ {token.COMMENT, "// // comment //"},
+ {token.COMMENT, "//" + f100},
+ {token.COMMENT, "#"},
+ {token.COMMENT, "##"},
+ {token.COMMENT, "# comment"},
+ {token.COMMENT, "# /* comment */"},
+ {token.COMMENT, "# # comment #"},
+ {token.COMMENT, "#" + f100},
+ {token.COMMENT, "/**/"},
+ {token.COMMENT, "/***/"},
+ {token.COMMENT, "/* comment */"},
+ {token.COMMENT, "/* // comment */"},
+ {token.COMMENT, "/* /* comment */"},
+ {token.COMMENT, "/*\n comment\n*/"},
+ {token.COMMENT, "/*" + f100 + "*/"},
+ },
+ "operator": []tokenPair{
+ {token.LBRACK, "["},
+ {token.LBRACE, "{"},
+ {token.COMMA, ","},
+ {token.PERIOD, "."},
+ {token.RBRACK, "]"},
+ {token.RBRACE, "}"},
+ {token.ASSIGN, "="},
+ {token.ADD, "+"},
+ {token.SUB, "-"},
+ },
+ "bool": []tokenPair{
+ {token.BOOL, "true"},
+ {token.BOOL, "false"},
+ },
+ "ident": []tokenPair{
+ {token.IDENT, "a"},
+ {token.IDENT, "a0"},
+ {token.IDENT, "foobar"},
+ {token.IDENT, "foo-bar"},
+ {token.IDENT, "abc123"},
+ {token.IDENT, "LGTM"},
+ {token.IDENT, "_"},
+ {token.IDENT, "_abc123"},
+ {token.IDENT, "abc123_"},
+ {token.IDENT, "_abc_123_"},
+ {token.IDENT, "_äöü"},
+ {token.IDENT, "_本"},
+ {token.IDENT, "äöü"},
+ {token.IDENT, "本"},
+ {token.IDENT, "a۰۱۸"},
+ {token.IDENT, "foo६४"},
+ {token.IDENT, "bar9876"},
+ },
+ "heredoc": []tokenPair{
+ {token.HEREDOC, "< 0 for %q", s.ErrorCount, src)
+ }
+}
+
+func testTokenList(t *testing.T, tokenList []tokenPair) {
+ // create artifical source code
+ buf := new(bytes.Buffer)
+ for _, ident := range tokenList {
+ fmt.Fprintf(buf, "%s\n", ident.text)
+ }
+
+ s := New(buf.Bytes())
+ for _, ident := range tokenList {
+ tok := s.Scan()
+ if tok.Type != ident.tok {
+ t.Errorf("tok = %q want %q for %q\n", tok, ident.tok, ident.text)
+ }
+
+ if tok.Text != ident.text {
+ t.Errorf("text = %q want %q", tok.String(), ident.text)
+ }
+
+ }
+}
+
+func countNewlines(s string) int {
+ n := 0
+ for _, ch := range s {
+ if ch == '\n' {
+ n++
+ }
+ }
+ return n
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
new file mode 100644
index 0000000000..e87ac63563
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
@@ -0,0 +1,245 @@
+package strconv
+
+import (
+ "errors"
+ "unicode/utf8"
+)
+
+// ErrSyntax indicates that a value does not have the right syntax for the target type.
+var ErrSyntax = errors.New("invalid syntax")
+
+// Unquote interprets s as a single-quoted, double-quoted,
+// or backquoted Go string literal, returning the string value
+// that s quotes. (If s is single-quoted, it would be a Go
+// character literal; Unquote returns the corresponding
+// one-character string.)
+func Unquote(s string) (t string, err error) {
+ n := len(s)
+ if n < 2 {
+ return "", ErrSyntax
+ }
+ quote := s[0]
+ if quote != s[n-1] {
+ return "", ErrSyntax
+ }
+ s = s[1 : n-1]
+
+ if quote != '"' {
+ return "", ErrSyntax
+ }
+ if contains(s, '\n') {
+ return "", ErrSyntax
+ }
+
+ // Is it trivial? Avoid allocation.
+ if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') {
+ switch quote {
+ case '"':
+ return s, nil
+ case '\'':
+ r, size := utf8.DecodeRuneInString(s)
+ if size == len(s) && (r != utf8.RuneError || size != 1) {
+ return s, nil
+ }
+ }
+ }
+
+ var runeTmp [utf8.UTFMax]byte
+ buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
+ for len(s) > 0 {
+ // If we're starting a '${}' then let it through un-unquoted.
+ // Specifically: we don't unquote any characters within the `${}`
+ // section, except for escaped quotes, which we handle specifically.
+ if s[0] == '$' && len(s) > 1 && s[1] == '{' {
+ buf = append(buf, '$', '{')
+ s = s[2:]
+
+ // Continue reading until we find the closing brace, copying as-is
+ braces := 1
+ for len(s) > 0 && braces > 0 {
+ r, size := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError {
+ return "", ErrSyntax
+ }
+
+ s = s[size:]
+
+ // We special case escaped double quotes in interpolations, converting
+ // them to straight double quotes.
+ if r == '\\' {
+ if q, _ := utf8.DecodeRuneInString(s); q == '"' {
+ continue
+ }
+ }
+
+ n := utf8.EncodeRune(runeTmp[:], r)
+ buf = append(buf, runeTmp[:n]...)
+
+ switch r {
+ case '{':
+ braces++
+ case '}':
+ braces--
+ }
+ }
+ if braces != 0 {
+ return "", ErrSyntax
+ }
+ if len(s) == 0 {
+ // If there's no string left, we're done!
+ break
+ } else {
+ // If there's more left, we need to pop back up to the top of the loop
+ // in case there's another interpolation in this string.
+ continue
+ }
+ }
+
+ c, multibyte, ss, err := unquoteChar(s, quote)
+ if err != nil {
+ return "", err
+ }
+ s = ss
+ if c < utf8.RuneSelf || !multibyte {
+ buf = append(buf, byte(c))
+ } else {
+ n := utf8.EncodeRune(runeTmp[:], c)
+ buf = append(buf, runeTmp[:n]...)
+ }
+ if quote == '\'' && len(s) != 0 {
+ // single-quoted must be single character
+ return "", ErrSyntax
+ }
+ }
+ return string(buf), nil
+}
+
+// contains reports whether the string contains the byte c.
+func contains(s string, c byte) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] == c {
+ return true
+ }
+ }
+ return false
+}
+
+func unhex(b byte) (v rune, ok bool) {
+ c := rune(b)
+ switch {
+ case '0' <= c && c <= '9':
+ return c - '0', true
+ case 'a' <= c && c <= 'f':
+ return c - 'a' + 10, true
+ case 'A' <= c && c <= 'F':
+ return c - 'A' + 10, true
+ }
+ return
+}
+
+func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
+ // easy cases
+ switch c := s[0]; {
+ case c == quote && (quote == '\'' || quote == '"'):
+ err = ErrSyntax
+ return
+ case c >= utf8.RuneSelf:
+ r, size := utf8.DecodeRuneInString(s)
+ return r, true, s[size:], nil
+ case c != '\\':
+ return rune(s[0]), false, s[1:], nil
+ }
+
+ // hard case: c is backslash
+ if len(s) <= 1 {
+ err = ErrSyntax
+ return
+ }
+ c := s[1]
+ s = s[2:]
+
+ switch c {
+ case 'a':
+ value = '\a'
+ case 'b':
+ value = '\b'
+ case 'f':
+ value = '\f'
+ case 'n':
+ value = '\n'
+ case 'r':
+ value = '\r'
+ case 't':
+ value = '\t'
+ case 'v':
+ value = '\v'
+ case 'x', 'u', 'U':
+ n := 0
+ switch c {
+ case 'x':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
+ n = 8
+ }
+ var v rune
+ if len(s) < n {
+ err = ErrSyntax
+ return
+ }
+ for j := 0; j < n; j++ {
+ x, ok := unhex(s[j])
+ if !ok {
+ err = ErrSyntax
+ return
+ }
+ v = v<<4 | x
+ }
+ s = s[n:]
+ if c == 'x' {
+ // single-byte string, possibly not UTF-8
+ value = v
+ break
+ }
+ if v > utf8.MaxRune {
+ err = ErrSyntax
+ return
+ }
+ value = v
+ multibyte = true
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ v := rune(c) - '0'
+ if len(s) < 2 {
+ err = ErrSyntax
+ return
+ }
+ for j := 0; j < 2; j++ { // one digit already; two more
+ x := rune(s[j]) - '0'
+ if x < 0 || x > 7 {
+ err = ErrSyntax
+ return
+ }
+ v = (v << 3) | x
+ }
+ s = s[2:]
+ if v > 255 {
+ err = ErrSyntax
+ return
+ }
+ value = v
+ case '\\':
+ value = '\\'
+ case '\'', '"':
+ if c != quote {
+ err = ErrSyntax
+ return
+ }
+ value = rune(c)
+ default:
+ err = ErrSyntax
+ return
+ }
+ tail = s
+ return
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go
new file mode 100644
index 0000000000..4a810aa38a
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote_test.go
@@ -0,0 +1,93 @@
+package strconv
+
+import "testing"
+
+type quoteTest struct {
+ in string
+ out string
+ ascii string
+}
+
+var quotetests = []quoteTest{
+ {"\a\b\f\r\n\t\v", `"\a\b\f\r\n\t\v"`, `"\a\b\f\r\n\t\v"`},
+ {"\\", `"\\"`, `"\\"`},
+ {"abc\xffdef", `"abc\xffdef"`, `"abc\xffdef"`},
+ {"\u263a", `"☺"`, `"\u263a"`},
+ {"\U0010ffff", `"\U0010ffff"`, `"\U0010ffff"`},
+ {"\x04", `"\x04"`, `"\x04"`},
+}
+
+type unQuoteTest struct {
+ in string
+ out string
+}
+
+var unquotetests = []unQuoteTest{
+ {`""`, ""},
+ {`"a"`, "a"},
+ {`"abc"`, "abc"},
+ {`"☺"`, "☺"},
+ {`"hello world"`, "hello world"},
+ {`"\xFF"`, "\xFF"},
+ {`"\377"`, "\377"},
+ {`"\u1234"`, "\u1234"},
+ {`"\U00010111"`, "\U00010111"},
+ {`"\U0001011111"`, "\U0001011111"},
+ {`"\a\b\f\n\r\t\v\\\""`, "\a\b\f\n\r\t\v\\\""},
+ {`"'"`, "'"},
+ {`"${file("foo")}"`, `${file("foo")}`},
+ {`"${file(\"foo\")}"`, `${file("foo")}`},
+ {`"echo ${var.region}${element(split(",",var.zones),0)}"`,
+ `echo ${var.region}${element(split(",",var.zones),0)}`},
+}
+
+var misquoted = []string{
+ ``,
+ `"`,
+ `"a`,
+ `"'`,
+ `b"`,
+ `"\"`,
+ `"\9"`,
+ `"\19"`,
+ `"\129"`,
+ `'\'`,
+ `'\9'`,
+ `'\19'`,
+ `'\129'`,
+ `'ab'`,
+ `"\x1!"`,
+ `"\U12345678"`,
+ `"\z"`,
+ "`",
+ "`xxx",
+ "`\"",
+ `"\'"`,
+ `'\"'`,
+ "\"\n\"",
+ "\"\\n\n\"",
+ "'\n'",
+ `"${"`,
+ `"${foo{}"`,
+}
+
+func TestUnquote(t *testing.T) {
+ for _, tt := range unquotetests {
+ if out, err := Unquote(tt.in); err != nil || out != tt.out {
+ t.Errorf("Unquote(%#q) = %q, %v want %q, nil", tt.in, out, err, tt.out)
+ }
+ }
+
+ // run the quote tests too, backward
+ for _, tt := range quotetests {
+ if in, err := Unquote(tt.out); in != tt.in {
+ t.Errorf("Unquote(%#q) = %q, %v, want %q, nil", tt.out, in, err, tt.in)
+ }
+ }
+
+ for _, s := range misquoted {
+ if out, err := Unquote(s); out != "" || err != ErrSyntax {
+ t.Errorf("Unquote(%#q) = %q, %v want %q, %v", s, out, err, "", ErrSyntax)
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/array_comment.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/array_comment.hcl
new file mode 100644
index 0000000000..78c2675823
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/array_comment.hcl
@@ -0,0 +1,4 @@
+foo = [
+ "1",
+ "2", # comment
+]
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/assign_colon.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/assign_colon.hcl
new file mode 100644
index 0000000000..eb5a99a694
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/assign_colon.hcl
@@ -0,0 +1,6 @@
+resource = [{
+ "foo": {
+ "bar": {},
+ "baz": [1, 2, "foo"],
+ }
+}]
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/assign_deep.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/assign_deep.hcl
new file mode 100644
index 0000000000..dd3151cb7d
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/assign_deep.hcl
@@ -0,0 +1,5 @@
+resource = [{
+ foo = [{
+ bar = {}
+ }]
+}]
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment.hcl
new file mode 100644
index 0000000000..1ff7f29fd2
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment.hcl
@@ -0,0 +1,15 @@
+// Foo
+
+/* Bar */
+
+/*
+/*
+Baz
+*/
+
+# Another
+
+# Multiple
+# Lines
+
+foo = "bar"
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment_single.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment_single.hcl
new file mode 100644
index 0000000000..fec56017dc
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/comment_single.hcl
@@ -0,0 +1 @@
+# Hello
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex.hcl
new file mode 100644
index 0000000000..cccb5b06fc
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex.hcl
@@ -0,0 +1,42 @@
+// This comes from Terraform, as a test
+variable "foo" {
+ default = "bar"
+ description = "bar"
+}
+
+provider "aws" {
+ access_key = "foo"
+ secret_key = "bar"
+}
+
+provider "do" {
+ api_key = "${var.foo}"
+}
+
+resource "aws_security_group" "firewall" {
+ count = 5
+}
+
+resource aws_instance "web" {
+ ami = "${var.foo}"
+ security_groups = [
+ "foo",
+ "${aws_security_group.firewall.foo}"
+ ]
+
+ network_interface {
+ device_index = 0
+ description = "Main network interface"
+ }
+}
+
+resource "aws_instance" "db" {
+ security_groups = "${aws_security_group.firewall.*.id}"
+ VPC = "foo"
+
+ depends_on = ["aws_instance.web"]
+}
+
+output "web_ip" {
+ value = "${aws_instance.web.private_ip}"
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex_key.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex_key.hcl
new file mode 100644
index 0000000000..0007aaf5f7
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/complex_key.hcl
@@ -0,0 +1 @@
+foo.bar = "baz"
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/empty.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/empty.hcl
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list.hcl
new file mode 100644
index 0000000000..059d4ce65b
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list.hcl
@@ -0,0 +1 @@
+foo = [1, 2, "foo"]
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list_comma.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list_comma.hcl
new file mode 100644
index 0000000000..50f4218ac8
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/list_comma.hcl
@@ -0,0 +1 @@
+foo = [1, 2, "foo",]
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/multiple.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/multiple.hcl
new file mode 100644
index 0000000000..029c54b0ce
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/multiple.hcl
@@ -0,0 +1,2 @@
+foo = "bar"
+key = 7
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/old.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/old.hcl
new file mode 100644
index 0000000000..e9f77cae90
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/old.hcl
@@ -0,0 +1,3 @@
+default = {
+ "eu-west-1": "ami-b1cf19c6",
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure.hcl
new file mode 100644
index 0000000000..92592fbb3c
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure.hcl
@@ -0,0 +1,5 @@
+// This is a test structure for the lexer
+foo bar "baz" {
+ key = 7
+ foo = "bar"
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_basic.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_basic.hcl
new file mode 100644
index 0000000000..7229a1f012
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_basic.hcl
@@ -0,0 +1,5 @@
+foo {
+ value = 7
+ "value" = 8
+ "complex::value" = 9
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_empty.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_empty.hcl
new file mode 100644
index 0000000000..4d156ddea9
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/structure_empty.hcl
@@ -0,0 +1 @@
+resource "foo" "bar" {}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/types.hcl b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/types.hcl
new file mode 100644
index 0000000000..cf2747ea1a
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/test-fixtures/types.hcl
@@ -0,0 +1,7 @@
+foo = "bar"
+bar = 7
+baz = [1,2,3]
+foo = -12
+bar = 3.14159
+foo = true
+bar = false
diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go
new file mode 100644
index 0000000000..59c1bb72d4
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/token/position.go
@@ -0,0 +1,46 @@
+package token
+
+import "fmt"
+
+// Pos describes an arbitrary source position
+// including the file, line, and column location.
+// A Position is valid if the line number is > 0.
+type Pos struct {
+ Filename string // filename, if any
+ Offset int // offset, starting at 0
+ Line int // line number, starting at 1
+ Column int // column number, starting at 1 (character count)
+}
+
+// IsValid returns true if the position is valid.
+func (p *Pos) IsValid() bool { return p.Line > 0 }
+
+// String returns a string in one of several forms:
+//
+// file:line:column valid position with file name
+// line:column valid position without file name
+// file invalid position with file name
+// - invalid position without file name
+func (p Pos) String() string {
+ s := p.Filename
+ if p.IsValid() {
+ if s != "" {
+ s += ":"
+ }
+ s += fmt.Sprintf("%d:%d", p.Line, p.Column)
+ }
+ if s == "" {
+ s = "-"
+ }
+ return s
+}
+
+// Before reports whether the position p is before u.
+func (p Pos) Before(u Pos) bool {
+ return u.Offset > p.Offset || u.Line > p.Line
+}
+
+// After reports whether the position p is after u.
+func (p Pos) After(u Pos) bool {
+ return u.Offset < p.Offset || u.Line < p.Line
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go
new file mode 100644
index 0000000000..6e99498040
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/token/token.go
@@ -0,0 +1,214 @@
+// Package token defines constants representing the lexical tokens for HCL
+// (HashiCorp Configuration Language)
+package token
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ hclstrconv "github.com/hashicorp/hcl/hcl/strconv"
+)
+
+// Token defines a single HCL token which can be obtained via the Scanner
+type Token struct {
+ Type Type
+ Pos Pos
+ Text string
+ JSON bool
+}
+
+// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
+type Type int
+
+const (
+ // Special tokens
+ ILLEGAL Type = iota
+ EOF
+ COMMENT
+
+ identifier_beg
+ IDENT // literals
+ literal_beg
+ NUMBER // 12345
+ FLOAT // 123.45
+ BOOL // true,false
+ STRING // "abc"
+ HEREDOC // < 0 {
+ // Pop the current item
+ n := len(frontier)
+ item := frontier[n-1]
+ frontier = frontier[:n-1]
+
+ switch v := item.Val.(type) {
+ case *ast.ObjectType:
+ items, frontier = flattenObjectType(v, item, items, frontier)
+ case *ast.ListType:
+ items, frontier = flattenListType(v, item, items, frontier)
+ default:
+ items = append(items, item)
+ }
+ }
+
+ // Reverse the list since the frontier model runs things backwards
+ for i := len(items)/2 - 1; i >= 0; i-- {
+ opp := len(items) - 1 - i
+ items[i], items[opp] = items[opp], items[i]
+ }
+
+ // Done! Set the original items
+ list.Items = items
+ return n, true
+ })
+}
+
+func flattenListType(
+ ot *ast.ListType,
+ item *ast.ObjectItem,
+ items []*ast.ObjectItem,
+ frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
+ // All the elements of this object must also be objects!
+ for _, subitem := range ot.List {
+ if _, ok := subitem.(*ast.ObjectType); !ok {
+ items = append(items, item)
+ return items, frontier
+ }
+ }
+
+ // Great! We have a match go through all the items and flatten
+ for _, elem := range ot.List {
+ // Add it to the frontier so that we can recurse
+ frontier = append(frontier, &ast.ObjectItem{
+ Keys: item.Keys,
+ Assign: item.Assign,
+ Val: elem,
+ LeadComment: item.LeadComment,
+ LineComment: item.LineComment,
+ })
+ }
+
+ return items, frontier
+}
+
+func flattenObjectType(
+ ot *ast.ObjectType,
+ item *ast.ObjectItem,
+ items []*ast.ObjectItem,
+ frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
+ // If the list has no items we do not have to flatten anything
+ if ot.List.Items == nil {
+ items = append(items, item)
+ return items, frontier
+ }
+
+ // All the elements of this object must also be objects!
+ for _, subitem := range ot.List.Items {
+ if _, ok := subitem.Val.(*ast.ObjectType); !ok {
+ items = append(items, item)
+ return items, frontier
+ }
+ }
+
+ // Great! We have a match go through all the items and flatten
+ for _, subitem := range ot.List.Items {
+ // Copy the new key
+ keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys))
+ copy(keys, item.Keys)
+ copy(keys[len(item.Keys):], subitem.Keys)
+
+ // Add it to the frontier so that we can recurse
+ frontier = append(frontier, &ast.ObjectItem{
+ Keys: keys,
+ Assign: item.Assign,
+ Val: subitem.Val,
+ LeadComment: item.LeadComment,
+ LineComment: item.LineComment,
+ })
+ }
+
+ return items, frontier
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go
new file mode 100644
index 0000000000..65d56c9b85
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/parser/parser.go
@@ -0,0 +1,297 @@
+package parser
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/json/scanner"
+ "github.com/hashicorp/hcl/json/token"
+)
+
+type Parser struct {
+ sc *scanner.Scanner
+
+ // Last read token
+ tok token.Token
+ commaPrev token.Token
+
+ enableTrace bool
+ indent int
+ n int // buffer size (max = 1)
+}
+
+func newParser(src []byte) *Parser {
+ return &Parser{
+ sc: scanner.New(src),
+ }
+}
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func Parse(src []byte) (*ast.File, error) {
+ p := newParser(src)
+ return p.Parse()
+}
+
+var errEofToken = errors.New("EOF token found")
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func (p *Parser) Parse() (*ast.File, error) {
+ f := &ast.File{}
+ var err, scerr error
+ p.sc.Error = func(pos token.Pos, msg string) {
+ scerr = fmt.Errorf("%s: %s", pos, msg)
+ }
+
+ // The root must be an object in JSON
+ object, err := p.object()
+ if scerr != nil {
+ return nil, scerr
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // We make our final node an object list so it is more HCL compatible
+ f.Node = object.List
+
+ // Flatten it, which finds patterns and turns them into more HCL-like
+ // AST trees.
+ flattenObjects(f.Node)
+
+ return f, nil
+}
+
+func (p *Parser) objectList() (*ast.ObjectList, error) {
+ defer un(trace(p, "ParseObjectList"))
+ node := &ast.ObjectList{}
+
+ for {
+ n, err := p.objectItem()
+ if err == errEofToken {
+ break // we are finished
+ }
+
+ // we don't return a nil node, because might want to use already
+ // collected items.
+ if err != nil {
+ return node, err
+ }
+
+ node.Add(n)
+
+ // Check for a followup comma. If it isn't a comma, then we're done
+ if tok := p.scan(); tok.Type != token.COMMA {
+ break
+ }
+ }
+ return node, nil
+}
+
+// objectItem parses a single object item
+func (p *Parser) objectItem() (*ast.ObjectItem, error) {
+ defer un(trace(p, "ParseObjectItem"))
+
+ keys, err := p.objectKey()
+ if err != nil {
+ return nil, err
+ }
+
+ o := &ast.ObjectItem{
+ Keys: keys,
+ }
+
+ switch p.tok.Type {
+ case token.COLON:
+ o.Val, err = p.objectValue()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return o, nil
+}
+
+// objectKey parses an object key and returns a ObjectKey AST
+func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
+ keyCount := 0
+ keys := make([]*ast.ObjectKey, 0)
+
+ for {
+ tok := p.scan()
+ switch tok.Type {
+ case token.EOF:
+ return nil, errEofToken
+ case token.STRING:
+ keyCount++
+ keys = append(keys, &ast.ObjectKey{
+ Token: p.tok.HCLToken(),
+ })
+ case token.COLON:
+ // Done
+ return keys, nil
+ case token.ILLEGAL:
+ fmt.Println("illegal")
+ default:
+ return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
+ }
+ }
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) objectValue() (ast.Node, error) {
+ defer un(trace(p, "ParseObjectValue"))
+ tok := p.scan()
+
+ switch tok.Type {
+ case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING:
+ return p.literalType()
+ case token.LBRACE:
+ return p.objectType()
+ case token.LBRACK:
+ return p.listType()
+ case token.EOF:
+ return nil, errEofToken
+ }
+
+ return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok)
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) object() (*ast.ObjectType, error) {
+ defer un(trace(p, "ParseType"))
+ tok := p.scan()
+
+ switch tok.Type {
+ case token.LBRACE:
+ return p.objectType()
+ case token.EOF:
+ return nil, errEofToken
+ }
+
+ return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok)
+}
+
+// objectType parses an object type and returns a ObjectType AST
+func (p *Parser) objectType() (*ast.ObjectType, error) {
+ defer un(trace(p, "ParseObjectType"))
+
+ // we assume that the currently scanned token is a LBRACE
+ o := &ast.ObjectType{}
+
+ l, err := p.objectList()
+
+ // if we hit RBRACE, we are good to go (means we parsed all Items), if it's
+ // not a RBRACE, it's an syntax error and we just return it.
+ if err != nil && p.tok.Type != token.RBRACE {
+ return nil, err
+ }
+
+ o.List = l
+ return o, nil
+}
+
+// listType parses a list type and returns a ListType AST
+func (p *Parser) listType() (*ast.ListType, error) {
+ defer un(trace(p, "ParseListType"))
+
+ // we assume that the currently scanned token is a LBRACK
+ l := &ast.ListType{}
+
+ for {
+ tok := p.scan()
+ switch tok.Type {
+ case token.NUMBER, token.FLOAT, token.STRING:
+ node, err := p.literalType()
+ if err != nil {
+ return nil, err
+ }
+
+ l.Add(node)
+ case token.COMMA:
+ continue
+ case token.LBRACE:
+ node, err := p.objectType()
+ if err != nil {
+ return nil, err
+ }
+
+ l.Add(node)
+ case token.BOOL:
+ // TODO(arslan) should we support? not supported by HCL yet
+ case token.LBRACK:
+ // TODO(arslan) should we support nested lists? Even though it's
+ // written in README of HCL, it's not a part of the grammar
+ // (not defined in parse.y)
+ case token.RBRACK:
+ // finished
+ return l, nil
+ default:
+ return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type)
+ }
+
+ }
+}
+
+// literalType parses a literal type and returns a LiteralType AST
+func (p *Parser) literalType() (*ast.LiteralType, error) {
+ defer un(trace(p, "ParseLiteral"))
+
+ return &ast.LiteralType{
+ Token: p.tok.HCLToken(),
+ }, nil
+}
+
+// scan returns the next token from the underlying scanner. If a token has
+// been unscanned then read that instead.
+func (p *Parser) scan() token.Token {
+ // If we have a token on the buffer, then return it.
+ if p.n != 0 {
+ p.n = 0
+ return p.tok
+ }
+
+ p.tok = p.sc.Scan()
+ return p.tok
+}
+
+// unscan pushes the previously read token back onto the buffer.
+func (p *Parser) unscan() {
+ p.n = 1
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *Parser) printTrace(a ...interface{}) {
+ if !p.enableTrace {
+ return
+ }
+
+ const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+ const n = len(dots)
+ fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
+
+ i := 2 * p.indent
+ for i > n {
+ fmt.Print(dots)
+ i -= n
+ }
+ // i <= n
+ fmt.Print(dots[0:i])
+ fmt.Println(a...)
+}
+
+func trace(p *Parser, msg string) *Parser {
+ p.printTrace(msg, "(")
+ p.indent++
+ return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *Parser) {
+ p.indent--
+ p.printTrace(")")
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser_test.go b/vendor/github.com/hashicorp/hcl/json/parser/parser_test.go
new file mode 100644
index 0000000000..8c66fb9ca5
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/parser/parser_test.go
@@ -0,0 +1,338 @@
+package parser
+
+import (
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "testing"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+func TestType(t *testing.T) {
+ var literals = []struct {
+ typ token.Type
+ src string
+ }{
+ {token.STRING, `"foo": "bar"`},
+ {token.NUMBER, `"foo": 123`},
+ {token.FLOAT, `"foo": 123.12`},
+ {token.FLOAT, `"foo": -123.12`},
+ {token.BOOL, `"foo": true`},
+ {token.STRING, `"foo": null`},
+ }
+
+ for _, l := range literals {
+ t.Logf("Testing: %s", l.src)
+
+ p := newParser([]byte(l.src))
+ item, err := p.objectItem()
+ if err != nil {
+ t.Error(err)
+ }
+
+ lit, ok := item.Val.(*ast.LiteralType)
+ if !ok {
+ t.Errorf("node should be of type LiteralType, got: %T", item.Val)
+ }
+
+ if lit.Token.Type != l.typ {
+ t.Errorf("want: %s, got: %s", l.typ, lit.Token.Type)
+ }
+ }
+}
+
+func TestListType(t *testing.T) {
+ var literals = []struct {
+ src string
+ tokens []token.Type
+ }{
+ {
+ `"foo": ["123", 123]`,
+ []token.Type{token.STRING, token.NUMBER},
+ },
+ {
+ `"foo": [123, "123",]`,
+ []token.Type{token.NUMBER, token.STRING},
+ },
+ {
+ `"foo": []`,
+ []token.Type{},
+ },
+ {
+ `"foo": ["123", 123]`,
+ []token.Type{token.STRING, token.NUMBER},
+ },
+ {
+ `"foo": ["123", {}]`,
+ []token.Type{token.STRING, token.LBRACE},
+ },
+ }
+
+ for _, l := range literals {
+ t.Logf("Testing: %s", l.src)
+
+ p := newParser([]byte(l.src))
+ item, err := p.objectItem()
+ if err != nil {
+ t.Error(err)
+ }
+
+ list, ok := item.Val.(*ast.ListType)
+ if !ok {
+ t.Errorf("node should be of type LiteralType, got: %T", item.Val)
+ }
+
+ tokens := []token.Type{}
+ for _, li := range list.List {
+ switch v := li.(type) {
+ case *ast.LiteralType:
+ tokens = append(tokens, v.Token.Type)
+ case *ast.ObjectType:
+ tokens = append(tokens, token.LBRACE)
+ }
+ }
+
+ equals(t, l.tokens, tokens)
+ }
+}
+
+func TestObjectType(t *testing.T) {
+ var literals = []struct {
+ src string
+ nodeType []ast.Node
+ itemLen int
+ }{
+ {
+ `"foo": {}`,
+ nil,
+ 0,
+ },
+ {
+ `"foo": {
+ "bar": "fatih"
+ }`,
+ []ast.Node{&ast.LiteralType{}},
+ 1,
+ },
+ {
+ `"foo": {
+ "bar": "fatih",
+ "baz": ["arslan"]
+ }`,
+ []ast.Node{
+ &ast.LiteralType{},
+ &ast.ListType{},
+ },
+ 2,
+ },
+ {
+ `"foo": {
+ "bar": {}
+ }`,
+ []ast.Node{
+ &ast.ObjectType{},
+ },
+ 1,
+ },
+ {
+ `"foo": {
+ "bar": {},
+ "foo": true
+ }`,
+ []ast.Node{
+ &ast.ObjectType{},
+ &ast.LiteralType{},
+ },
+ 2,
+ },
+ }
+
+ for _, l := range literals {
+ t.Logf("Testing:\n%s\n", l.src)
+
+ p := newParser([]byte(l.src))
+ // p.enableTrace = true
+ item, err := p.objectItem()
+ if err != nil {
+ t.Error(err)
+ }
+
+ // we know that the ObjectKey name is foo for all cases, what matters
+ // is the object
+ obj, ok := item.Val.(*ast.ObjectType)
+ if !ok {
+ t.Errorf("node should be of type LiteralType, got: %T", item.Val)
+ }
+
+ // check if the total length of items are correct
+ equals(t, l.itemLen, len(obj.List.Items))
+
+ // check if the types are correct
+ for i, item := range obj.List.Items {
+ equals(t, reflect.TypeOf(l.nodeType[i]), reflect.TypeOf(item.Val))
+ }
+ }
+}
+
+func TestFlattenObjects(t *testing.T) {
+ var literals = []struct {
+ src string
+ nodeType []ast.Node
+ itemLen int
+ }{
+ {
+ `{
+ "foo": [
+ {
+ "foo": "svh",
+ "bar": "fatih"
+ }
+ ]
+ }`,
+ []ast.Node{
+ &ast.ObjectType{},
+ &ast.LiteralType{},
+ &ast.LiteralType{},
+ },
+ 3,
+ },
+ {
+ `{
+ "variable": {
+ "foo": {}
+ }
+ }`,
+ []ast.Node{
+ &ast.ObjectType{},
+ },
+ 1,
+ },
+ }
+
+ for _, l := range literals {
+ t.Logf("Testing:\n%s\n", l.src)
+
+ f, err := Parse([]byte(l.src))
+ if err != nil {
+ t.Error(err)
+ }
+
+ // the first object is always an ObjectList so just assert that one
+ // so we can use it as such
+ obj, ok := f.Node.(*ast.ObjectList)
+ if !ok {
+ t.Errorf("node should be *ast.ObjectList, got: %T", f.Node)
+ }
+
+ // check if the types are correct
+ var i int
+ for _, item := range obj.Items {
+ equals(t, reflect.TypeOf(l.nodeType[i]), reflect.TypeOf(item.Val))
+ i++
+
+ if obj, ok := item.Val.(*ast.ObjectType); ok {
+ for _, item := range obj.List.Items {
+ equals(t, reflect.TypeOf(l.nodeType[i]), reflect.TypeOf(item.Val))
+ i++
+ }
+ }
+ }
+
+ // check if the number of items is correct
+ equals(t, l.itemLen, i)
+
+ }
+}
+
+func TestObjectKey(t *testing.T) {
+ keys := []struct {
+ exp []token.Type
+ src string
+ }{
+ {[]token.Type{token.STRING}, `"foo": {}`},
+ }
+
+ for _, k := range keys {
+ p := newParser([]byte(k.src))
+ keys, err := p.objectKey()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ tokens := []token.Type{}
+ for _, o := range keys {
+ tokens = append(tokens, o.Token.Type)
+ }
+
+ equals(t, k.exp, tokens)
+ }
+
+ errKeys := []struct {
+ src string
+ }{
+ {`foo 12 {}`},
+ {`foo bar = {}`},
+ {`foo []`},
+ {`12 {}`},
+ }
+
+ for _, k := range errKeys {
+ p := newParser([]byte(k.src))
+ _, err := p.objectKey()
+ if err == nil {
+ t.Errorf("case '%s' should give an error", k.src)
+ }
+ }
+}
+
+// Official HCL tests
+func TestParse(t *testing.T) {
+ cases := []struct {
+ Name string
+ Err bool
+ }{
+ {
+ "array.json",
+ false,
+ },
+ {
+ "basic.json",
+ false,
+ },
+ {
+ "object.json",
+ false,
+ },
+ {
+ "types.json",
+ false,
+ },
+ }
+
+ const fixtureDir = "./test-fixtures"
+
+ for _, tc := range cases {
+ d, err := ioutil.ReadFile(filepath.Join(fixtureDir, tc.Name))
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ _, err = Parse(d)
+ if (err != nil) != tc.Err {
+ t.Fatalf("Input: %s\n\nError: %s", tc.Name, err)
+ }
+ }
+}
+
+// equals fails the test if exp is not equal to act.
+func equals(tb testing.TB, exp, act interface{}) {
+ if !reflect.DeepEqual(exp, act) {
+ _, file, line, _ := runtime.Caller(1)
+ fmt.Printf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act)
+ tb.FailNow()
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/array.json b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/array.json
new file mode 100644
index 0000000000..e320f17ab2
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/array.json
@@ -0,0 +1,4 @@
+{
+ "foo": [1, 2, "bar"],
+ "bar": "baz"
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/basic.json b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/basic.json
new file mode 100644
index 0000000000..b54bde96c1
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/basic.json
@@ -0,0 +1,3 @@
+{
+ "foo": "bar"
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/object.json b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/object.json
new file mode 100644
index 0000000000..72168a3ccb
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/object.json
@@ -0,0 +1,5 @@
+{
+ "foo": {
+ "bar": [1,2]
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/types.json b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/types.json
new file mode 100644
index 0000000000..9a142a6ca6
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/parser/test-fixtures/types.json
@@ -0,0 +1,10 @@
+{
+ "foo": "bar",
+ "bar": 7,
+ "baz": [1,2,3],
+ "foo": -12,
+ "bar": 3.14159,
+ "foo": true,
+ "bar": false,
+ "foo": null
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
new file mode 100644
index 0000000000..477f71ff3d
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
@@ -0,0 +1,451 @@
+package scanner
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/hashicorp/hcl/json/token"
+)
+
+// eof represents a marker rune for the end of the reader.
+const eof = rune(0)
+
+// Scanner defines a lexical scanner
+type Scanner struct {
+ buf *bytes.Buffer // Source buffer for advancing and scanning
+ src []byte // Source buffer for immutable access
+
+ // Source Position
+ srcPos token.Pos // current position
+ prevPos token.Pos // previous position, used for peek() method
+
+ lastCharLen int // length of last character in bytes
+ lastLineLen int // length of last line in characters (for correct column reporting)
+
+ tokStart int // token text start position
+ tokEnd int // token text end position
+
+ // Error is called for each error encountered. If no Error
+ // function is set, the error is reported to os.Stderr.
+ Error func(pos token.Pos, msg string)
+
+ // ErrorCount is incremented by one for each error encountered.
+ ErrorCount int
+
+ // tokPos is the start position of most recently scanned token; set by
+ // Scan. The Filename field is always left untouched by the Scanner. If
+ // an error is reported (via Error) and Position is invalid, the scanner is
+ // not inside a token.
+ tokPos token.Pos
+}
+
+// New creates and initializes a new instance of Scanner using src as
+// its source content.
+func New(src []byte) *Scanner {
+ // even though we accept a src, we read from a io.Reader compatible type
+ // (*bytes.Buffer). So in the future we might easily change it to streaming
+ // read.
+ b := bytes.NewBuffer(src)
+ s := &Scanner{
+ buf: b,
+ src: src,
+ }
+
+ // srcPosition always starts with 1
+ s.srcPos.Line = 1
+ return s
+}
+
+// next reads the next rune from the bufferred reader. Returns the rune(0) if
+// an error occurs (or io.EOF is returned).
+func (s *Scanner) next() rune {
+ ch, size, err := s.buf.ReadRune()
+ if err != nil {
+ // advance for error reporting
+ s.srcPos.Column++
+ s.srcPos.Offset += size
+ s.lastCharLen = size
+ return eof
+ }
+
+ if ch == utf8.RuneError && size == 1 {
+ s.srcPos.Column++
+ s.srcPos.Offset += size
+ s.lastCharLen = size
+ s.err("illegal UTF-8 encoding")
+ return ch
+ }
+
+ // remember last position
+ s.prevPos = s.srcPos
+
+ s.srcPos.Column++
+ s.lastCharLen = size
+ s.srcPos.Offset += size
+
+ if ch == '\n' {
+ s.srcPos.Line++
+ s.lastLineLen = s.srcPos.Column
+ s.srcPos.Column = 0
+ }
+
+ // debug
+ // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
+ return ch
+}
+
+// unread unreads the previous read Rune and updates the source position
+func (s *Scanner) unread() {
+ if err := s.buf.UnreadRune(); err != nil {
+ panic(err) // this is user fault, we should catch it
+ }
+ s.srcPos = s.prevPos // put back last position
+}
+
+// peek returns the next rune without advancing the reader.
+func (s *Scanner) peek() rune {
+ peek, _, err := s.buf.ReadRune()
+ if err != nil {
+ return eof
+ }
+
+ s.buf.UnreadRune()
+ return peek
+}
+
+// Scan scans the next token and returns the token.
+func (s *Scanner) Scan() token.Token {
+ ch := s.next()
+
+ // skip white space
+ for isWhitespace(ch) {
+ ch = s.next()
+ }
+
+ var tok token.Type
+
+ // token text markings
+ s.tokStart = s.srcPos.Offset - s.lastCharLen
+
+ // token position, initial next() is moving the offset by one(size of rune
+ // actually), though we are interested with the starting point
+ s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
+ if s.srcPos.Column > 0 {
+ // common case: last character was not a '\n'
+ s.tokPos.Line = s.srcPos.Line
+ s.tokPos.Column = s.srcPos.Column
+ } else {
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ s.tokPos.Line = s.srcPos.Line - 1
+ s.tokPos.Column = s.lastLineLen
+ }
+
+ switch {
+ case isLetter(ch):
+ lit := s.scanIdentifier()
+ if lit == "true" || lit == "false" {
+ tok = token.BOOL
+ } else if lit == "null" {
+ tok = token.NULL
+ } else {
+ s.err("illegal char")
+ }
+ case isDecimal(ch):
+ tok = s.scanNumber(ch)
+ default:
+ switch ch {
+ case eof:
+ tok = token.EOF
+ case '"':
+ tok = token.STRING
+ s.scanString()
+ case '.':
+ tok = token.PERIOD
+ ch = s.peek()
+ if isDecimal(ch) {
+ tok = token.FLOAT
+ ch = s.scanMantissa(ch)
+ ch = s.scanExponent(ch)
+ }
+ case '[':
+ tok = token.LBRACK
+ case ']':
+ tok = token.RBRACK
+ case '{':
+ tok = token.LBRACE
+ case '}':
+ tok = token.RBRACE
+ case ',':
+ tok = token.COMMA
+ case ':':
+ tok = token.COLON
+ case '-':
+ if isDecimal(s.peek()) {
+ ch := s.next()
+ tok = s.scanNumber(ch)
+ } else {
+ s.err("illegal char")
+ }
+ default:
+ s.err("illegal char: " + string(ch))
+ }
+ }
+
+ // finish token ending
+ s.tokEnd = s.srcPos.Offset
+
+ // create token literal
+ var tokenText string
+ if s.tokStart >= 0 {
+ tokenText = string(s.src[s.tokStart:s.tokEnd])
+ }
+ s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
+
+ return token.Token{
+ Type: tok,
+ Pos: s.tokPos,
+ Text: tokenText,
+ }
+}
+
+// scanNumber scans a HCL number definition starting with the given rune
+func (s *Scanner) scanNumber(ch rune) token.Type {
+ zero := ch == '0'
+ pos := s.srcPos
+
+ s.scanMantissa(ch)
+ ch = s.next() // seek forward
+ if ch == 'e' || ch == 'E' {
+ ch = s.scanExponent(ch)
+ return token.FLOAT
+ }
+
+ if ch == '.' {
+ ch = s.scanFraction(ch)
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ ch = s.scanExponent(ch)
+ }
+ return token.FLOAT
+ }
+
+ if ch != eof {
+ s.unread()
+ }
+
+ // If we have a larger number and this is zero, error
+ if zero && pos != s.srcPos {
+ s.err("numbers cannot start with 0")
+ }
+
+ return token.NUMBER
+}
+
+// scanMantissa scans the mantissa begining from the rune. It returns the next
+// non decimal rune. It's used to determine wheter it's a fraction or exponent.
+func (s *Scanner) scanMantissa(ch rune) rune {
+ scanned := false
+ for isDecimal(ch) {
+ ch = s.next()
+ scanned = true
+ }
+
+ if scanned && ch != eof {
+ s.unread()
+ }
+ return ch
+}
+
+// scanFraction scans the fraction after the '.' rune
+func (s *Scanner) scanFraction(ch rune) rune {
+ if ch == '.' {
+ ch = s.peek() // we peek just to see if we can move forward
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
+// rune.
+func (s *Scanner) scanExponent(ch rune) rune {
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ if ch == '-' || ch == '+' {
+ ch = s.next()
+ }
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+// scanString scans a quoted string
+func (s *Scanner) scanString() {
+ braces := 0
+ for {
+ // '"' opening already consumed
+ // read character after quote
+ ch := s.next()
+
+ if ch == '\n' || ch < 0 || ch == eof {
+ s.err("literal not terminated")
+ return
+ }
+
+ if ch == '"' && braces == 0 {
+ break
+ }
+
+ // If we're going into a ${} then we can ignore quotes for awhile
+ if braces == 0 && ch == '$' && s.peek() == '{' {
+ braces++
+ s.next()
+ } else if braces > 0 && ch == '{' {
+ braces++
+ }
+ if braces > 0 && ch == '}' {
+ braces--
+ }
+
+ if ch == '\\' {
+ s.scanEscape()
+ }
+ }
+
+ return
+}
+
+// scanEscape scans an escape sequence
+func (s *Scanner) scanEscape() rune {
+ // http://en.cppreference.com/w/cpp/language/escape
+ ch := s.next() // read character after '/'
+ switch ch {
+ case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
+ // nothing to do
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ // octal notation
+ ch = s.scanDigits(ch, 8, 3)
+ case 'x':
+ // hexademical notation
+ ch = s.scanDigits(s.next(), 16, 2)
+ case 'u':
+ // universal character name
+ ch = s.scanDigits(s.next(), 16, 4)
+ case 'U':
+ // universal character name
+ ch = s.scanDigits(s.next(), 16, 8)
+ default:
+ s.err("illegal char escape")
+ }
+ return ch
+}
+
+// scanDigits scans a rune with the given base for n times. For example an
+// octal notation \184 would yield in scanDigits(ch, 8, 3)
+func (s *Scanner) scanDigits(ch rune, base, n int) rune {
+ for n > 0 && digitVal(ch) < base {
+ ch = s.next()
+ n--
+ }
+ if n > 0 {
+ s.err("illegal char escape")
+ }
+
+ // we scanned all digits, put the last non digit char back
+ s.unread()
+ return ch
+}
+
+// scanIdentifier scans an identifier and returns the literal string
+func (s *Scanner) scanIdentifier() string {
+ offs := s.srcPos.Offset - s.lastCharLen
+ ch := s.next()
+ for isLetter(ch) || isDigit(ch) || ch == '-' {
+ ch = s.next()
+ }
+
+ if ch != eof {
+ s.unread() // we got identifier, put back latest char
+ }
+
+ return string(s.src[offs:s.srcPos.Offset])
+}
+
+// recentPosition returns the position of the character immediately after the
+// character or token returned by the last call to Scan.
+func (s *Scanner) recentPosition() (pos token.Pos) {
+ pos.Offset = s.srcPos.Offset - s.lastCharLen
+ switch {
+ case s.srcPos.Column > 0:
+ // common case: last character was not a '\n'
+ pos.Line = s.srcPos.Line
+ pos.Column = s.srcPos.Column
+ case s.lastLineLen > 0:
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ pos.Line = s.srcPos.Line - 1
+ pos.Column = s.lastLineLen
+ default:
+ // at the beginning of the source
+ pos.Line = 1
+ pos.Column = 1
+ }
+ return
+}
+
+// err prints the error of any scanning to s.Error function. If the function is
+// not defined, by default it prints them to os.Stderr
+func (s *Scanner) err(msg string) {
+ s.ErrorCount++
+ pos := s.recentPosition()
+
+ if s.Error != nil {
+ s.Error(pos, msg)
+ return
+ }
+
+ fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
+}
+
+// isHexadecimal returns true if the given rune is a letter
+func isLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
+}
+
+// isHexadecimal returns true if the given rune is a decimal digit
+func isDigit(ch rune) bool {
+ return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
+}
+
+// isHexadecimal returns true if the given rune is a decimal number
+func isDecimal(ch rune) bool {
+ return '0' <= ch && ch <= '9'
+}
+
+// isHexadecimal returns true if the given rune is an hexadecimal number
+func isHexadecimal(ch rune) bool {
+ return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
+}
+
+// isWhitespace returns true if the rune is a space, tab, newline or carriage return
+func isWhitespace(ch rune) bool {
+ return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
+}
+
+// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
+func digitVal(ch rune) int {
+ switch {
+ case '0' <= ch && ch <= '9':
+ return int(ch - '0')
+ case 'a' <= ch && ch <= 'f':
+ return int(ch - 'a' + 10)
+ case 'A' <= ch && ch <= 'F':
+ return int(ch - 'A' + 10)
+ }
+ return 16 // larger than any legal digit val
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner_test.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner_test.go
new file mode 100644
index 0000000000..fe2d75524d
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner_test.go
@@ -0,0 +1,363 @@
+package scanner
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+
+ "github.com/hashicorp/hcl/json/token"
+)
+
+var f100 = "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
+
+type tokenPair struct {
+ tok token.Type
+ text string
+}
+
+var tokenLists = map[string][]tokenPair{
+ "operator": []tokenPair{
+ {token.LBRACK, "["},
+ {token.LBRACE, "{"},
+ {token.COMMA, ","},
+ {token.PERIOD, "."},
+ {token.RBRACK, "]"},
+ {token.RBRACE, "}"},
+ },
+ "bool": []tokenPair{
+ {token.BOOL, "true"},
+ {token.BOOL, "false"},
+ },
+ "string": []tokenPair{
+ {token.STRING, `" "`},
+ {token.STRING, `"a"`},
+ {token.STRING, `"本"`},
+ {token.STRING, `"${file("foo")}"`},
+ {token.STRING, `"${file(\"foo\")}"`},
+ {token.STRING, `"\a"`},
+ {token.STRING, `"\b"`},
+ {token.STRING, `"\f"`},
+ {token.STRING, `"\n"`},
+ {token.STRING, `"\r"`},
+ {token.STRING, `"\t"`},
+ {token.STRING, `"\v"`},
+ {token.STRING, `"\""`},
+ {token.STRING, `"\000"`},
+ {token.STRING, `"\777"`},
+ {token.STRING, `"\x00"`},
+ {token.STRING, `"\xff"`},
+ {token.STRING, `"\u0000"`},
+ {token.STRING, `"\ufA16"`},
+ {token.STRING, `"\U00000000"`},
+ {token.STRING, `"\U0000ffAB"`},
+ {token.STRING, `"` + f100 + `"`},
+ },
+ "number": []tokenPair{
+ {token.NUMBER, "0"},
+ {token.NUMBER, "1"},
+ {token.NUMBER, "9"},
+ {token.NUMBER, "42"},
+ {token.NUMBER, "1234567890"},
+ {token.NUMBER, "-0"},
+ {token.NUMBER, "-1"},
+ {token.NUMBER, "-9"},
+ {token.NUMBER, "-42"},
+ {token.NUMBER, "-1234567890"},
+ },
+ "float": []tokenPair{
+ {token.FLOAT, "0."},
+ {token.FLOAT, "1."},
+ {token.FLOAT, "42."},
+ {token.FLOAT, "01234567890."},
+ {token.FLOAT, ".0"},
+ {token.FLOAT, ".1"},
+ {token.FLOAT, ".42"},
+ {token.FLOAT, ".0123456789"},
+ {token.FLOAT, "0.0"},
+ {token.FLOAT, "1.0"},
+ {token.FLOAT, "42.0"},
+ {token.FLOAT, "01234567890.0"},
+ {token.FLOAT, "0e0"},
+ {token.FLOAT, "1e0"},
+ {token.FLOAT, "42e0"},
+ {token.FLOAT, "01234567890e0"},
+ {token.FLOAT, "0E0"},
+ {token.FLOAT, "1E0"},
+ {token.FLOAT, "42E0"},
+ {token.FLOAT, "01234567890E0"},
+ {token.FLOAT, "0e+10"},
+ {token.FLOAT, "1e-10"},
+ {token.FLOAT, "42e+10"},
+ {token.FLOAT, "01234567890e-10"},
+ {token.FLOAT, "0E+10"},
+ {token.FLOAT, "1E-10"},
+ {token.FLOAT, "42E+10"},
+ {token.FLOAT, "01234567890E-10"},
+ {token.FLOAT, "01.8e0"},
+ {token.FLOAT, "1.4e0"},
+ {token.FLOAT, "42.2e0"},
+ {token.FLOAT, "01234567890.12e0"},
+ {token.FLOAT, "0.E0"},
+ {token.FLOAT, "1.12E0"},
+ {token.FLOAT, "42.123E0"},
+ {token.FLOAT, "01234567890.213E0"},
+ {token.FLOAT, "0.2e+10"},
+ {token.FLOAT, "1.2e-10"},
+ {token.FLOAT, "42.54e+10"},
+ {token.FLOAT, "01234567890.98e-10"},
+ {token.FLOAT, "0.1E+10"},
+ {token.FLOAT, "1.1E-10"},
+ {token.FLOAT, "42.1E+10"},
+ {token.FLOAT, "01234567890.1E-10"},
+ {token.FLOAT, "-0.0"},
+ {token.FLOAT, "-1.0"},
+ {token.FLOAT, "-42.0"},
+ {token.FLOAT, "-01234567890.0"},
+ {token.FLOAT, "-0e0"},
+ {token.FLOAT, "-1e0"},
+ {token.FLOAT, "-42e0"},
+ {token.FLOAT, "-01234567890e0"},
+ {token.FLOAT, "-0E0"},
+ {token.FLOAT, "-1E0"},
+ {token.FLOAT, "-42E0"},
+ {token.FLOAT, "-01234567890E0"},
+ {token.FLOAT, "-0e+10"},
+ {token.FLOAT, "-1e-10"},
+ {token.FLOAT, "-42e+10"},
+ {token.FLOAT, "-01234567890e-10"},
+ {token.FLOAT, "-0E+10"},
+ {token.FLOAT, "-1E-10"},
+ {token.FLOAT, "-42E+10"},
+ {token.FLOAT, "-01234567890E-10"},
+ {token.FLOAT, "-01.8e0"},
+ {token.FLOAT, "-1.4e0"},
+ {token.FLOAT, "-42.2e0"},
+ {token.FLOAT, "-01234567890.12e0"},
+ {token.FLOAT, "-0.E0"},
+ {token.FLOAT, "-1.12E0"},
+ {token.FLOAT, "-42.123E0"},
+ {token.FLOAT, "-01234567890.213E0"},
+ {token.FLOAT, "-0.2e+10"},
+ {token.FLOAT, "-1.2e-10"},
+ {token.FLOAT, "-42.54e+10"},
+ {token.FLOAT, "-01234567890.98e-10"},
+ {token.FLOAT, "-0.1E+10"},
+ {token.FLOAT, "-1.1E-10"},
+ {token.FLOAT, "-42.1E+10"},
+ {token.FLOAT, "-01234567890.1E-10"},
+ },
+}
+
+var orderedTokenLists = []string{
+ "comment",
+ "operator",
+ "bool",
+ "string",
+ "number",
+ "float",
+}
+
+func TestPosition(t *testing.T) {
+ // create artifical source code
+ buf := new(bytes.Buffer)
+
+ for _, listName := range orderedTokenLists {
+ for _, ident := range tokenLists[listName] {
+ fmt.Fprintf(buf, "\t\t\t\t%s\n", ident.text)
+ }
+ }
+
+ s := New(buf.Bytes())
+
+ pos := token.Pos{"", 4, 1, 5}
+ s.Scan()
+ for _, listName := range orderedTokenLists {
+
+ for _, k := range tokenLists[listName] {
+ curPos := s.tokPos
+ // fmt.Printf("[%q] s = %+v:%+v\n", k.text, curPos.Offset, curPos.Column)
+
+ if curPos.Offset != pos.Offset {
+ t.Fatalf("offset = %d, want %d for %q", curPos.Offset, pos.Offset, k.text)
+ }
+ if curPos.Line != pos.Line {
+ t.Fatalf("line = %d, want %d for %q", curPos.Line, pos.Line, k.text)
+ }
+ if curPos.Column != pos.Column {
+ t.Fatalf("column = %d, want %d for %q", curPos.Column, pos.Column, k.text)
+ }
+ pos.Offset += 4 + len(k.text) + 1 // 4 tabs + token bytes + newline
+ pos.Line += countNewlines(k.text) + 1 // each token is on a new line
+
+ s.Error = func(pos token.Pos, msg string) {
+ t.Errorf("error %q for %q", msg, k.text)
+ }
+
+ s.Scan()
+ }
+ }
+ // make sure there were no token-internal errors reported by scanner
+ if s.ErrorCount != 0 {
+ t.Errorf("%d errors", s.ErrorCount)
+ }
+}
+
+func TestComment(t *testing.T) {
+ testTokenList(t, tokenLists["comment"])
+}
+
+func TestOperator(t *testing.T) {
+ testTokenList(t, tokenLists["operator"])
+}
+
+func TestBool(t *testing.T) {
+ testTokenList(t, tokenLists["bool"])
+}
+
+func TestIdent(t *testing.T) {
+ testTokenList(t, tokenLists["ident"])
+}
+
+func TestString(t *testing.T) {
+ testTokenList(t, tokenLists["string"])
+}
+
+func TestNumber(t *testing.T) {
+ testTokenList(t, tokenLists["number"])
+}
+
+func TestFloat(t *testing.T) {
+ testTokenList(t, tokenLists["float"])
+}
+
+func TestRealExample(t *testing.T) {
+ complexReal := `
+{
+ "variable": {
+ "foo": {
+ "default": "bar",
+ "description": "bar",
+ "depends_on": ["something"]
+ }
+ }
+}`
+
+ literals := []struct {
+ tokenType token.Type
+ literal string
+ }{
+ {token.LBRACE, `{`},
+ {token.STRING, `"variable"`},
+ {token.COLON, `:`},
+ {token.LBRACE, `{`},
+ {token.STRING, `"foo"`},
+ {token.COLON, `:`},
+ {token.LBRACE, `{`},
+ {token.STRING, `"default"`},
+ {token.COLON, `:`},
+ {token.STRING, `"bar"`},
+ {token.COMMA, `,`},
+ {token.STRING, `"description"`},
+ {token.COLON, `:`},
+ {token.STRING, `"bar"`},
+ {token.COMMA, `,`},
+ {token.STRING, `"depends_on"`},
+ {token.COLON, `:`},
+ {token.LBRACK, `[`},
+ {token.STRING, `"something"`},
+ {token.RBRACK, `]`},
+ {token.RBRACE, `}`},
+ {token.RBRACE, `}`},
+ {token.RBRACE, `}`},
+ {token.EOF, ``},
+ }
+
+ s := New([]byte(complexReal))
+ for _, l := range literals {
+ tok := s.Scan()
+ if l.tokenType != tok.Type {
+ t.Errorf("got: %s want %s for %s\n", tok, l.tokenType, tok.String())
+ }
+
+ if l.literal != tok.Text {
+ t.Errorf("got: %s want %s\n", tok, l.literal)
+ }
+ }
+
+}
+
+func TestError(t *testing.T) {
+ testError(t, "\x80", "1:1", "illegal UTF-8 encoding", token.ILLEGAL)
+ testError(t, "\xff", "1:1", "illegal UTF-8 encoding", token.ILLEGAL)
+
+ testError(t, `"ab`+"\x80", "1:4", "illegal UTF-8 encoding", token.STRING)
+ testError(t, `"abc`+"\xff", "1:5", "illegal UTF-8 encoding", token.STRING)
+
+ testError(t, `01238`, "1:7", "numbers cannot start with 0", token.NUMBER)
+ testError(t, `01238123`, "1:10", "numbers cannot start with 0", token.NUMBER)
+ testError(t, `'aa'`, "1:1", "illegal char: '", token.ILLEGAL)
+
+ testError(t, `"`, "1:2", "literal not terminated", token.STRING)
+ testError(t, `"abc`, "1:5", "literal not terminated", token.STRING)
+ testError(t, `"abc`+"\n", "1:5", "literal not terminated", token.STRING)
+}
+
+func testError(t *testing.T, src, pos, msg string, tok token.Type) {
+ s := New([]byte(src))
+
+ errorCalled := false
+ s.Error = func(p token.Pos, m string) {
+ if !errorCalled {
+ if pos != p.String() {
+ t.Errorf("pos = %q, want %q for %q", p, pos, src)
+ }
+
+ if m != msg {
+ t.Errorf("msg = %q, want %q for %q", m, msg, src)
+ }
+ errorCalled = true
+ }
+ }
+
+ tk := s.Scan()
+ if tk.Type != tok {
+ t.Errorf("tok = %s, want %s for %q", tk, tok, src)
+ }
+ if !errorCalled {
+ t.Errorf("error handler not called for %q", src)
+ }
+ if s.ErrorCount == 0 {
+ t.Errorf("count = %d, want > 0 for %q", s.ErrorCount, src)
+ }
+}
+
+func testTokenList(t *testing.T, tokenList []tokenPair) {
+ // create artifical source code
+ buf := new(bytes.Buffer)
+ for _, ident := range tokenList {
+ fmt.Fprintf(buf, "%s\n", ident.text)
+ }
+
+ s := New(buf.Bytes())
+ for _, ident := range tokenList {
+ tok := s.Scan()
+ if tok.Type != ident.tok {
+ t.Errorf("tok = %q want %q for %q\n", tok, ident.tok, ident.text)
+ }
+
+ if tok.Text != ident.text {
+ t.Errorf("text = %q want %q", tok.String(), ident.text)
+ }
+
+ }
+}
+
+func countNewlines(s string) int {
+ n := 0
+ for _, ch := range s {
+ if ch == '\n' {
+ n++
+ }
+ }
+ return n
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/test-fixtures/array.json b/vendor/github.com/hashicorp/hcl/json/test-fixtures/array.json
new file mode 100644
index 0000000000..e320f17ab2
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/test-fixtures/array.json
@@ -0,0 +1,4 @@
+{
+ "foo": [1, 2, "bar"],
+ "bar": "baz"
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/test-fixtures/basic.json b/vendor/github.com/hashicorp/hcl/json/test-fixtures/basic.json
new file mode 100644
index 0000000000..b54bde96c1
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/test-fixtures/basic.json
@@ -0,0 +1,3 @@
+{
+ "foo": "bar"
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/test-fixtures/object.json b/vendor/github.com/hashicorp/hcl/json/test-fixtures/object.json
new file mode 100644
index 0000000000..72168a3ccb
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/test-fixtures/object.json
@@ -0,0 +1,5 @@
+{
+ "foo": {
+ "bar": [1,2]
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/test-fixtures/types.json b/vendor/github.com/hashicorp/hcl/json/test-fixtures/types.json
new file mode 100644
index 0000000000..9a142a6ca6
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/test-fixtures/types.json
@@ -0,0 +1,10 @@
+{
+ "foo": "bar",
+ "bar": 7,
+ "baz": [1,2,3],
+ "foo": -12,
+ "bar": 3.14159,
+ "foo": true,
+ "bar": false,
+ "foo": null
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go
new file mode 100644
index 0000000000..59c1bb72d4
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/token/position.go
@@ -0,0 +1,46 @@
+package token
+
+import "fmt"
+
+// Pos describes an arbitrary source position
+// including the file, line, and column location.
+// A Position is valid if the line number is > 0.
+type Pos struct {
+ Filename string // filename, if any
+ Offset int // offset, starting at 0
+ Line int // line number, starting at 1
+ Column int // column number, starting at 1 (character count)
+}
+
+// IsValid returns true if the position is valid.
+func (p *Pos) IsValid() bool { return p.Line > 0 }
+
+// String returns a string in one of several forms:
+//
+// file:line:column valid position with file name
+// line:column valid position without file name
+// file invalid position with file name
+// - invalid position without file name
+func (p Pos) String() string {
+ s := p.Filename
+ if p.IsValid() {
+ if s != "" {
+ s += ":"
+ }
+ s += fmt.Sprintf("%d:%d", p.Line, p.Column)
+ }
+ if s == "" {
+ s = "-"
+ }
+ return s
+}
+
+// Before reports whether the position p is before u.
+func (p Pos) Before(u Pos) bool {
+ return u.Offset > p.Offset || u.Line > p.Line
+}
+
+// After reports whether the position p is after u.
+func (p Pos) After(u Pos) bool {
+ return u.Offset < p.Offset || u.Line < p.Line
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go
new file mode 100644
index 0000000000..95a0c3eee6
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/token/token.go
@@ -0,0 +1,118 @@
+package token
+
+import (
+ "fmt"
+ "strconv"
+
+ hcltoken "github.com/hashicorp/hcl/hcl/token"
+)
+
+// Token defines a single HCL token which can be obtained via the Scanner
+type Token struct {
+ Type Type
+ Pos Pos
+ Text string
+}
+
+// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
+type Type int
+
+const (
+ // Special tokens
+ ILLEGAL Type = iota
+ EOF
+
+ identifier_beg
+ literal_beg
+ NUMBER // 12345
+ FLOAT // 123.45
+ BOOL // true,false
+ STRING // "abc"
+ NULL // null
+ literal_end
+ identifier_end
+
+ operator_beg
+ LBRACK // [
+ LBRACE // {
+ COMMA // ,
+ PERIOD // .
+ COLON // :
+
+ RBRACK // ]
+ RBRACE // }
+
+ operator_end
+)
+
+var tokens = [...]string{
+ ILLEGAL: "ILLEGAL",
+
+ EOF: "EOF",
+
+ NUMBER: "NUMBER",
+ FLOAT: "FLOAT",
+ BOOL: "BOOL",
+ STRING: "STRING",
+ NULL: "NULL",
+
+ LBRACK: "LBRACK",
+ LBRACE: "LBRACE",
+ COMMA: "COMMA",
+ PERIOD: "PERIOD",
+ COLON: "COLON",
+
+ RBRACK: "RBRACK",
+ RBRACE: "RBRACE",
+}
+
+// String returns the string corresponding to the token tok.
+func (t Type) String() string {
+ s := ""
+ if 0 <= t && t < Type(len(tokens)) {
+ s = tokens[t]
+ }
+ if s == "" {
+ s = "token(" + strconv.Itoa(int(t)) + ")"
+ }
+ return s
+}
+
+// IsIdentifier returns true for tokens corresponding to identifiers and basic
+// type literals; it returns false otherwise.
+func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
+
+// IsLiteral returns true for tokens corresponding to basic type literals; it
+// returns false otherwise.
+func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
+
+// IsOperator returns true for tokens corresponding to operators and
+// delimiters; it returns false otherwise.
+func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
+
+// String returns the token's literal text. Note that this is only
+// applicable for certain token types, such as token.IDENT,
+// token.STRING, etc..
+func (t Token) String() string {
+ return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
+}
+
+// HCLToken converts this token to an HCL token.
+//
+// The token type must be a literal type or this will panic.
+func (t Token) HCLToken() hcltoken.Token {
+ switch t.Type {
+ case BOOL:
+ return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text}
+ case FLOAT:
+ return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text}
+ case NULL:
+ return hcltoken.Token{Type: hcltoken.STRING, Text: ""}
+ case NUMBER:
+ return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text}
+ case STRING:
+ return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true}
+ default:
+ panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type))
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/token/token_test.go b/vendor/github.com/hashicorp/hcl/json/token/token_test.go
new file mode 100644
index 0000000000..a83fdd55bb
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/token/token_test.go
@@ -0,0 +1,34 @@
+package token
+
+import (
+ "testing"
+)
+
+func TestTypeString(t *testing.T) {
+ var tokens = []struct {
+ tt Type
+ str string
+ }{
+ {ILLEGAL, "ILLEGAL"},
+ {EOF, "EOF"},
+ {NUMBER, "NUMBER"},
+ {FLOAT, "FLOAT"},
+ {BOOL, "BOOL"},
+ {STRING, "STRING"},
+ {NULL, "NULL"},
+ {LBRACK, "LBRACK"},
+ {LBRACE, "LBRACE"},
+ {COMMA, "COMMA"},
+ {PERIOD, "PERIOD"},
+ {RBRACK, "RBRACK"},
+ {RBRACE, "RBRACE"},
+ }
+
+ for _, token := range tokens {
+ if token.tt.String() != token.str {
+ t.Errorf("want: %q got:%q\n", token.str, token.tt)
+
+ }
+ }
+
+}
diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go
new file mode 100644
index 0000000000..d9993c2928
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/lex.go
@@ -0,0 +1,38 @@
+package hcl
+
+import (
+ "unicode"
+ "unicode/utf8"
+)
+
+type lexModeValue byte
+
+const (
+ lexModeUnknown lexModeValue = iota
+ lexModeHcl
+ lexModeJson
+)
+
+// lexMode returns whether we're going to be parsing in JSON
+// mode or HCL mode.
+func lexMode(v []byte) lexModeValue {
+ var (
+ r rune
+ w int
+ offset int
+ )
+
+ for {
+ r, w = utf8.DecodeRune(v[offset:])
+ offset += w
+ if unicode.IsSpace(r) {
+ continue
+ }
+ if r == '{' {
+ return lexModeJson
+ }
+ break
+ }
+
+ return lexModeHcl
+}
diff --git a/vendor/github.com/hashicorp/hcl/lex_test.go b/vendor/github.com/hashicorp/hcl/lex_test.go
new file mode 100644
index 0000000000..8062764446
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/lex_test.go
@@ -0,0 +1,37 @@
+package hcl
+
+import (
+ "testing"
+)
+
+func TestLexMode(t *testing.T) {
+ cases := []struct {
+ Input string
+ Mode lexModeValue
+ }{
+ {
+ "",
+ lexModeHcl,
+ },
+ {
+ "foo",
+ lexModeHcl,
+ },
+ {
+ "{}",
+ lexModeJson,
+ },
+ {
+ " {}",
+ lexModeJson,
+ },
+ }
+
+ for i, tc := range cases {
+ actual := lexMode([]byte(tc.Input))
+
+ if actual != tc.Mode {
+ t.Fatalf("%d: %#v", i, actual)
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go
new file mode 100644
index 0000000000..1fca53c4ce
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/parse.go
@@ -0,0 +1,39 @@
+package hcl
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ hclParser "github.com/hashicorp/hcl/hcl/parser"
+ jsonParser "github.com/hashicorp/hcl/json/parser"
+)
+
+// ParseBytes accepts as input byte slice and returns ast tree.
+//
+// Input can be either JSON or HCL
+func ParseBytes(in []byte) (*ast.File, error) {
+ return parse(in)
+}
+
+// ParseString accepts input as a string and returns ast tree.
+func ParseString(input string) (*ast.File, error) {
+ return parse([]byte(input))
+}
+
+func parse(in []byte) (*ast.File, error) {
+ switch lexMode(in) {
+ case lexModeHcl:
+ return hclParser.Parse(in)
+ case lexModeJson:
+ return jsonParser.Parse(in)
+ }
+
+ return nil, fmt.Errorf("unknown config format")
+}
+
+// Parse parses the given input and returns the root object.
+//
+// The input format can be either HCL or JSON.
+func Parse(input string) (*ast.File, error) {
+ return parse([]byte(input))
+}
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/basic.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/basic.hcl
new file mode 100644
index 0000000000..9499944872
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/basic.hcl
@@ -0,0 +1,2 @@
+foo = "bar"
+bar = "${file("bing/bong.txt")}"
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/basic.json b/vendor/github.com/hashicorp/hcl/test-fixtures/basic.json
new file mode 100644
index 0000000000..7bdddc84b0
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/basic.json
@@ -0,0 +1,4 @@
+{
+ "foo": "bar",
+ "bar": "${file(\"bing/bong.txt\")}"
+}
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/basic_int_string.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/basic_int_string.hcl
new file mode 100644
index 0000000000..4e415da20e
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/basic_int_string.hcl
@@ -0,0 +1 @@
+count = "3"
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/basic_squish.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/basic_squish.hcl
new file mode 100644
index 0000000000..363697b496
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/basic_squish.hcl
@@ -0,0 +1,3 @@
+foo="bar"
+bar="${file("bing/bong.txt")}"
+foo-bar="baz"
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.hcl
new file mode 100644
index 0000000000..5b185cc918
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.hcl
@@ -0,0 +1,15 @@
+key "" {
+ policy = "read"
+}
+
+key "foo/" {
+ policy = "write"
+}
+
+key "foo/bar/" {
+ policy = "read"
+}
+
+key "foo/bar/baz" {
+ policy = "deny"
+}
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.json b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.json
new file mode 100644
index 0000000000..151864ee89
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_policy.json
@@ -0,0 +1,19 @@
+{
+ "key": {
+ "": {
+ "policy": "read"
+ },
+
+ "foo/": {
+ "policy": "write"
+ },
+
+ "foo/bar/": {
+ "policy": "read"
+ },
+
+ "foo/bar/baz": {
+ "policy": "deny"
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.hcl
new file mode 100644
index 0000000000..52dcaa1bc3
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.hcl
@@ -0,0 +1,10 @@
+variable "foo" {
+ default = "bar"
+ description = "bar"
+}
+
+variable "amis" {
+ default = {
+ east = "foo"
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.json b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.json
new file mode 100644
index 0000000000..49f921ed0b
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/decode_tf_variable.json
@@ -0,0 +1,14 @@
+{
+ "variable": {
+ "foo": {
+ "default": "bar",
+ "description": "bar"
+ },
+
+ "amis": {
+ "default": {
+ "east": "foo"
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/empty.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/empty.hcl
new file mode 100644
index 0000000000..5be1b23154
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/empty.hcl
@@ -0,0 +1 @@
+resource "foo" {}
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/escape.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/escape.hcl
new file mode 100644
index 0000000000..ead1b8b99e
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/escape.hcl
@@ -0,0 +1 @@
+foo = "bar\"baz\\n"
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/flat.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/flat.hcl
new file mode 100644
index 0000000000..9bca551f89
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/flat.hcl
@@ -0,0 +1,2 @@
+foo = "bar"
+Key = 7
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/float.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/float.hcl
new file mode 100644
index 0000000000..eed44e5420
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/float.hcl
@@ -0,0 +1 @@
+a = 1.02
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/float.json b/vendor/github.com/hashicorp/hcl/test-fixtures/float.json
new file mode 100644
index 0000000000..a9d1ab4b02
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/float.json
@@ -0,0 +1,3 @@
+{
+ "a": 1.02
+}
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/interpolate_escape.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/interpolate_escape.hcl
new file mode 100644
index 0000000000..7b95391387
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/interpolate_escape.hcl
@@ -0,0 +1 @@
+foo="${file(\"bing/bong.txt\")}"
diff --git a/vendor/github.com/hashicorp/hcl/test-fixtures/multiline.hcl b/vendor/github.com/hashicorp/hcl/test-fixtures/multiline.hcl
new file mode 100644
index 0000000000..f883bd707b
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/test-fixtures/multiline.hcl
@@ -0,0 +1,4 @@
+foo = < []}`}},
+}
+
+func TestDiff(t *testing.T) {
+ for _, tt := range diffs {
+ got := Diff(tt.a, tt.b)
+ eq := len(got) == len(tt.exp)
+ if eq {
+ for i := range got {
+ eq = eq && got[i] == tt.exp[i]
+ }
+ }
+ if !eq {
+ t.Errorf("diffing % #v", tt.a)
+ t.Errorf("with % #v", tt.b)
+ diffdiff(t, got, tt.exp)
+ continue
+ }
+ }
+}
+
+func diffdiff(t *testing.T, got, exp []string) {
+ minus(t, "unexpected:", got, exp)
+ minus(t, "missing:", exp, got)
+}
+
+func minus(t *testing.T, s string, a, b []string) {
+ var i, j int
+ for i = 0; i < len(a); i++ {
+ for j = 0; j < len(b); j++ {
+ if a[i] == b[j] {
+ break
+ }
+ }
+ if j == len(b) {
+ t.Error(s, a[i])
+ }
+ }
+}
diff --git a/vendor/github.com/kr/pretty/example_test.go b/vendor/github.com/kr/pretty/example_test.go
new file mode 100644
index 0000000000..ecf40f3fcc
--- /dev/null
+++ b/vendor/github.com/kr/pretty/example_test.go
@@ -0,0 +1,20 @@
+package pretty_test
+
+import (
+ "fmt"
+ "github.com/kr/pretty"
+)
+
+func Example() {
+ type myType struct {
+ a, b int
+ }
+ var x = []myType{{1, 2}, {3, 4}, {5, 6}}
+ fmt.Printf("%# v", pretty.Formatter(x))
+ // output:
+ // []pretty_test.myType{
+ // {a:1, b:2},
+ // {a:3, b:4},
+ // {a:5, b:6},
+ // }
+}
diff --git a/vendor/github.com/kr/pretty/formatter.go b/vendor/github.com/kr/pretty/formatter.go
new file mode 100644
index 0000000000..8dacda25fa
--- /dev/null
+++ b/vendor/github.com/kr/pretty/formatter.go
@@ -0,0 +1,337 @@
+package pretty
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "text/tabwriter"
+
+ "github.com/kr/text"
+)
+
+const (
+ limit = 50
+)
+
+type formatter struct {
+ x interface{}
+ force bool
+ quote bool
+}
+
+// Formatter makes a wrapper, f, that will format x as go source with line
+// breaks and tabs. Object f responds to the "%v" formatting verb when both the
+// "#" and " " (space) flags are set, for example:
+//
+// fmt.Sprintf("%# v", Formatter(x))
+//
+// If one of these two flags is not set, or any other verb is used, f will
+// format x according to the usual rules of package fmt.
+// In particular, if x satisfies fmt.Formatter, then x.Format will be called.
+func Formatter(x interface{}) (f fmt.Formatter) {
+ return formatter{x: x, quote: true}
+}
+
+func (fo formatter) String() string {
+ return fmt.Sprint(fo.x) // unwrap it
+}
+
+func (fo formatter) passThrough(f fmt.State, c rune) {
+ s := "%"
+ for i := 0; i < 128; i++ {
+ if f.Flag(i) {
+ s += string(i)
+ }
+ }
+ if w, ok := f.Width(); ok {
+ s += fmt.Sprintf("%d", w)
+ }
+ if p, ok := f.Precision(); ok {
+ s += fmt.Sprintf(".%d", p)
+ }
+ s += string(c)
+ fmt.Fprintf(f, s, fo.x)
+}
+
+func (fo formatter) Format(f fmt.State, c rune) {
+ if fo.force || c == 'v' && f.Flag('#') && f.Flag(' ') {
+ w := tabwriter.NewWriter(f, 4, 4, 1, ' ', 0)
+ p := &printer{tw: w, Writer: w, visited: make(map[visit]int)}
+ p.printValue(reflect.ValueOf(fo.x), true, fo.quote)
+ w.Flush()
+ return
+ }
+ fo.passThrough(f, c)
+}
+
+type printer struct {
+ io.Writer
+ tw *tabwriter.Writer
+ visited map[visit]int
+ depth int
+}
+
+func (p *printer) indent() *printer {
+ q := *p
+ q.tw = tabwriter.NewWriter(p.Writer, 4, 4, 1, ' ', 0)
+ q.Writer = text.NewIndentWriter(q.tw, []byte{'\t'})
+ return &q
+}
+
+func (p *printer) printInline(v reflect.Value, x interface{}, showType bool) {
+ if showType {
+ io.WriteString(p, v.Type().String())
+ fmt.Fprintf(p, "(%#v)", x)
+ } else {
+ fmt.Fprintf(p, "%#v", x)
+ }
+}
+
+// printValue must keep track of already-printed pointer values to avoid
+// infinite recursion.
+type visit struct {
+ v uintptr
+ typ reflect.Type
+}
+
+func (p *printer) printValue(v reflect.Value, showType, quote bool) {
+ if p.depth > 10 {
+ io.WriteString(p, "!%v(DEPTH EXCEEDED)")
+ return
+ }
+
+ switch v.Kind() {
+ case reflect.Bool:
+ p.printInline(v, v.Bool(), showType)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ p.printInline(v, v.Int(), showType)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ p.printInline(v, v.Uint(), showType)
+ case reflect.Float32, reflect.Float64:
+ p.printInline(v, v.Float(), showType)
+ case reflect.Complex64, reflect.Complex128:
+ fmt.Fprintf(p, "%#v", v.Complex())
+ case reflect.String:
+ p.fmtString(v.String(), quote)
+ case reflect.Map:
+ t := v.Type()
+ if showType {
+ io.WriteString(p, t.String())
+ }
+ writeByte(p, '{')
+ if nonzero(v) {
+ expand := !canInline(v.Type())
+ pp := p
+ if expand {
+ writeByte(p, '\n')
+ pp = p.indent()
+ }
+ keys := v.MapKeys()
+ for i := 0; i < v.Len(); i++ {
+ showTypeInStruct := true
+ k := keys[i]
+ mv := v.MapIndex(k)
+ pp.printValue(k, false, true)
+ writeByte(pp, ':')
+ if expand {
+ writeByte(pp, '\t')
+ }
+ showTypeInStruct = t.Elem().Kind() == reflect.Interface
+ pp.printValue(mv, showTypeInStruct, true)
+ if expand {
+ io.WriteString(pp, ",\n")
+ } else if i < v.Len()-1 {
+ io.WriteString(pp, ", ")
+ }
+ }
+ if expand {
+ pp.tw.Flush()
+ }
+ }
+ writeByte(p, '}')
+ case reflect.Struct:
+ t := v.Type()
+ if v.CanAddr() {
+ addr := v.UnsafeAddr()
+ vis := visit{addr, t}
+ if vd, ok := p.visited[vis]; ok && vd < p.depth {
+ p.fmtString(t.String()+"{(CYCLIC REFERENCE)}", false)
+ break // don't print v again
+ }
+ p.visited[vis] = p.depth
+ }
+
+ if showType {
+ io.WriteString(p, t.String())
+ }
+ writeByte(p, '{')
+ if nonzero(v) {
+ expand := !canInline(v.Type())
+ pp := p
+ if expand {
+ writeByte(p, '\n')
+ pp = p.indent()
+ }
+ for i := 0; i < v.NumField(); i++ {
+ showTypeInStruct := true
+ if f := t.Field(i); f.Name != "" {
+ io.WriteString(pp, f.Name)
+ writeByte(pp, ':')
+ if expand {
+ writeByte(pp, '\t')
+ }
+ showTypeInStruct = labelType(f.Type)
+ }
+ pp.printValue(getField(v, i), showTypeInStruct, true)
+ if expand {
+ io.WriteString(pp, ",\n")
+ } else if i < v.NumField()-1 {
+ io.WriteString(pp, ", ")
+ }
+ }
+ if expand {
+ pp.tw.Flush()
+ }
+ }
+ writeByte(p, '}')
+ case reflect.Interface:
+ switch e := v.Elem(); {
+ case e.Kind() == reflect.Invalid:
+ io.WriteString(p, "nil")
+ case e.IsValid():
+ pp := *p
+ pp.depth++
+ pp.printValue(e, showType, true)
+ default:
+ io.WriteString(p, v.Type().String())
+ io.WriteString(p, "(nil)")
+ }
+ case reflect.Array, reflect.Slice:
+ t := v.Type()
+ if showType {
+ io.WriteString(p, t.String())
+ }
+ if v.Kind() == reflect.Slice && v.IsNil() && showType {
+ io.WriteString(p, "(nil)")
+ break
+ }
+ if v.Kind() == reflect.Slice && v.IsNil() {
+ io.WriteString(p, "nil")
+ break
+ }
+ writeByte(p, '{')
+ expand := !canInline(v.Type())
+ pp := p
+ if expand {
+ writeByte(p, '\n')
+ pp = p.indent()
+ }
+ for i := 0; i < v.Len(); i++ {
+ showTypeInSlice := t.Elem().Kind() == reflect.Interface
+ pp.printValue(v.Index(i), showTypeInSlice, true)
+ if expand {
+ io.WriteString(pp, ",\n")
+ } else if i < v.Len()-1 {
+ io.WriteString(pp, ", ")
+ }
+ }
+ if expand {
+ pp.tw.Flush()
+ }
+ writeByte(p, '}')
+ case reflect.Ptr:
+ e := v.Elem()
+ if !e.IsValid() {
+ writeByte(p, '(')
+ io.WriteString(p, v.Type().String())
+ io.WriteString(p, ")(nil)")
+ } else {
+ pp := *p
+ pp.depth++
+ writeByte(pp, '&')
+ pp.printValue(e, true, true)
+ }
+ case reflect.Chan:
+ x := v.Pointer()
+ if showType {
+ writeByte(p, '(')
+ io.WriteString(p, v.Type().String())
+ fmt.Fprintf(p, ")(%#v)", x)
+ } else {
+ fmt.Fprintf(p, "%#v", x)
+ }
+ case reflect.Func:
+ io.WriteString(p, v.Type().String())
+ io.WriteString(p, " {...}")
+ case reflect.UnsafePointer:
+ p.printInline(v, v.Pointer(), showType)
+ case reflect.Invalid:
+ io.WriteString(p, "nil")
+ }
+}
+
+func canInline(t reflect.Type) bool {
+ switch t.Kind() {
+ case reflect.Map:
+ return !canExpand(t.Elem())
+ case reflect.Struct:
+ for i := 0; i < t.NumField(); i++ {
+ if canExpand(t.Field(i).Type) {
+ return false
+ }
+ }
+ return true
+ case reflect.Interface:
+ return false
+ case reflect.Array, reflect.Slice:
+ return !canExpand(t.Elem())
+ case reflect.Ptr:
+ return false
+ case reflect.Chan, reflect.Func, reflect.UnsafePointer:
+ return false
+ }
+ return true
+}
+
+func canExpand(t reflect.Type) bool {
+ switch t.Kind() {
+ case reflect.Map, reflect.Struct,
+ reflect.Interface, reflect.Array, reflect.Slice,
+ reflect.Ptr:
+ return true
+ }
+ return false
+}
+
+func labelType(t reflect.Type) bool {
+ switch t.Kind() {
+ case reflect.Interface, reflect.Struct:
+ return true
+ }
+ return false
+}
+
+func (p *printer) fmtString(s string, quote bool) {
+ if quote {
+ s = strconv.Quote(s)
+ }
+ io.WriteString(p, s)
+}
+
+func tryDeepEqual(a, b interface{}) bool {
+ defer func() { recover() }()
+ return reflect.DeepEqual(a, b)
+}
+
+func writeByte(w io.Writer, b byte) {
+ w.Write([]byte{b})
+}
+
+func getField(v reflect.Value, i int) reflect.Value {
+ val := v.Field(i)
+ if val.Kind() == reflect.Interface && !val.IsNil() {
+ val = val.Elem()
+ }
+ return val
+}
diff --git a/vendor/github.com/kr/pretty/formatter_test.go b/vendor/github.com/kr/pretty/formatter_test.go
new file mode 100644
index 0000000000..5f3204e8e8
--- /dev/null
+++ b/vendor/github.com/kr/pretty/formatter_test.go
@@ -0,0 +1,261 @@
+package pretty
+
+import (
+ "fmt"
+ "io"
+ "strings"
+ "testing"
+ "unsafe"
+)
+
+type test struct {
+ v interface{}
+ s string
+}
+
+type LongStructTypeName struct {
+ longFieldName interface{}
+ otherLongFieldName interface{}
+}
+
+type SA struct {
+ t *T
+ v T
+}
+
+type T struct {
+ x, y int
+}
+
+type F int
+
+func (f F) Format(s fmt.State, c rune) {
+ fmt.Fprintf(s, "F(%d)", int(f))
+}
+
+var long = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
+
+var gosyntax = []test{
+ {nil, `nil`},
+ {"", `""`},
+ {"a", `"a"`},
+ {1, "int(1)"},
+ {1.0, "float64(1)"},
+ {[]int(nil), "[]int(nil)"},
+ {[0]int{}, "[0]int{}"},
+ {complex(1, 0), "(1+0i)"},
+ //{make(chan int), "(chan int)(0x1234)"},
+ {unsafe.Pointer(uintptr(unsafe.Pointer(&long))), fmt.Sprintf("unsafe.Pointer(0x%02x)", uintptr(unsafe.Pointer(&long)))},
+ {func(int) {}, "func(int) {...}"},
+ {map[int]int{1: 1}, "map[int]int{1:1}"},
+ {int32(1), "int32(1)"},
+ {io.EOF, `&errors.errorString{s:"EOF"}`},
+ {[]string{"a"}, `[]string{"a"}`},
+ {
+ []string{long},
+ `[]string{"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"}`,
+ },
+ {F(5), "pretty.F(5)"},
+ {
+ SA{&T{1, 2}, T{3, 4}},
+ `pretty.SA{
+ t: &pretty.T{x:1, y:2},
+ v: pretty.T{x:3, y:4},
+}`,
+ },
+ {
+ map[int][]byte{1: {}},
+ `map[int][]uint8{
+ 1: {},
+}`,
+ },
+ {
+ map[int]T{1: {}},
+ `map[int]pretty.T{
+ 1: {},
+}`,
+ },
+ {
+ long,
+ `"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"`,
+ },
+ {
+ LongStructTypeName{
+ longFieldName: LongStructTypeName{},
+ otherLongFieldName: long,
+ },
+ `pretty.LongStructTypeName{
+ longFieldName: pretty.LongStructTypeName{},
+ otherLongFieldName: "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789",
+}`,
+ },
+ {
+ &LongStructTypeName{
+ longFieldName: &LongStructTypeName{},
+ otherLongFieldName: (*LongStructTypeName)(nil),
+ },
+ `&pretty.LongStructTypeName{
+ longFieldName: &pretty.LongStructTypeName{},
+ otherLongFieldName: (*pretty.LongStructTypeName)(nil),
+}`,
+ },
+ {
+ []LongStructTypeName{
+ {nil, nil},
+ {3, 3},
+ {long, nil},
+ },
+ `[]pretty.LongStructTypeName{
+ {},
+ {
+ longFieldName: int(3),
+ otherLongFieldName: int(3),
+ },
+ {
+ longFieldName: "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789",
+ otherLongFieldName: nil,
+ },
+}`,
+ },
+ {
+ []interface{}{
+ LongStructTypeName{nil, nil},
+ []byte{1, 2, 3},
+ T{3, 4},
+ LongStructTypeName{long, nil},
+ },
+ `[]interface {}{
+ pretty.LongStructTypeName{},
+ []uint8{0x1, 0x2, 0x3},
+ pretty.T{x:3, y:4},
+ pretty.LongStructTypeName{
+ longFieldName: "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789",
+ otherLongFieldName: nil,
+ },
+}`,
+ },
+}
+
+func TestGoSyntax(t *testing.T) {
+ for _, tt := range gosyntax {
+ s := fmt.Sprintf("%# v", Formatter(tt.v))
+ if tt.s != s {
+ t.Errorf("expected %q", tt.s)
+ t.Errorf("got %q", s)
+ t.Errorf("expraw\n%s", tt.s)
+ t.Errorf("gotraw\n%s", s)
+ }
+ }
+}
+
+type I struct {
+ i int
+ R interface{}
+}
+
+func (i *I) I() *I { return i.R.(*I) }
+
+func TestCycle(t *testing.T) {
+ type A struct{ *A }
+ v := &A{}
+ v.A = v
+
+ // panics from stack overflow without cycle detection
+ t.Logf("Example cycle:\n%# v", Formatter(v))
+
+ p := &A{}
+ s := fmt.Sprintf("%# v", Formatter([]*A{p, p}))
+ if strings.Contains(s, "CYCLIC") {
+ t.Errorf("Repeated address detected as cyclic reference:\n%s", s)
+ }
+
+ type R struct {
+ i int
+ *R
+ }
+ r := &R{
+ i: 1,
+ R: &R{
+ i: 2,
+ R: &R{
+ i: 3,
+ },
+ },
+ }
+ r.R.R.R = r
+ t.Logf("Example longer cycle:\n%# v", Formatter(r))
+
+ r = &R{
+ i: 1,
+ R: &R{
+ i: 2,
+ R: &R{
+ i: 3,
+ R: &R{
+ i: 4,
+ R: &R{
+ i: 5,
+ R: &R{
+ i: 6,
+ R: &R{
+ i: 7,
+ R: &R{
+ i: 8,
+ R: &R{
+ i: 9,
+ R: &R{
+ i: 10,
+ R: &R{
+ i: 11,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+ // here be pirates
+ r.R.R.R.R.R.R.R.R.R.R.R = r
+ t.Logf("Example very long cycle:\n%# v", Formatter(r))
+
+ i := &I{
+ i: 1,
+ R: &I{
+ i: 2,
+ R: &I{
+ i: 3,
+ R: &I{
+ i: 4,
+ R: &I{
+ i: 5,
+ R: &I{
+ i: 6,
+ R: &I{
+ i: 7,
+ R: &I{
+ i: 8,
+ R: &I{
+ i: 9,
+ R: &I{
+ i: 10,
+ R: &I{
+ i: 11,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+ iv := i.I().I().I().I().I().I().I().I().I().I()
+ *iv = *i
+ t.Logf("Example long interface cycle:\n%# v", Formatter(i))
+}
diff --git a/vendor/github.com/kr/pretty/pretty.go b/vendor/github.com/kr/pretty/pretty.go
new file mode 100644
index 0000000000..b91020a284
--- /dev/null
+++ b/vendor/github.com/kr/pretty/pretty.go
@@ -0,0 +1,107 @@
+// Package pretty provides pretty-printing for Go values. This is
+// useful during debugging, to avoid wrapping long output lines in
+// the terminal.
+//
+// It provides a function, Formatter, that can be used with any
+// function that accepts a format string. It also provides
+// convenience wrappers for functions in packages fmt and log.
+package pretty
+
+import (
+ "fmt"
+ "io"
+ "log"
+)
+
+// Errorf is a convenience wrapper for fmt.Errorf.
+//
+// Calling Errorf(f, x, y) is equivalent to
+// fmt.Errorf(f, Formatter(x), Formatter(y)).
+func Errorf(format string, a ...interface{}) error {
+ return fmt.Errorf(format, wrap(a, false)...)
+}
+
+// Fprintf is a convenience wrapper for fmt.Fprintf.
+//
+// Calling Fprintf(w, f, x, y) is equivalent to
+// fmt.Fprintf(w, f, Formatter(x), Formatter(y)).
+func Fprintf(w io.Writer, format string, a ...interface{}) (n int, error error) {
+ return fmt.Fprintf(w, format, wrap(a, false)...)
+}
+
+// Log is a convenience wrapper for log.Printf.
+//
+// Calling Log(x, y) is equivalent to
+// log.Print(Formatter(x), Formatter(y)), but each operand is
+// formatted with "%# v".
+func Log(a ...interface{}) {
+ log.Print(wrap(a, true)...)
+}
+
+// Logf is a convenience wrapper for log.Printf.
+//
+// Calling Logf(f, x, y) is equivalent to
+// log.Printf(f, Formatter(x), Formatter(y)).
+func Logf(format string, a ...interface{}) {
+ log.Printf(format, wrap(a, false)...)
+}
+
+// Logln is a convenience wrapper for log.Printf.
+//
+// Calling Logln(x, y) is equivalent to
+// log.Println(Formatter(x), Formatter(y)), but each operand is
+// formatted with "%# v".
+func Logln(a ...interface{}) {
+ log.Println(wrap(a, true)...)
+}
+
+// Print pretty-prints its operands and writes to standard output.
+//
+// Calling Print(x, y) is equivalent to
+// fmt.Print(Formatter(x), Formatter(y)), but each operand is
+// formatted with "%# v".
+func Print(a ...interface{}) (n int, errno error) {
+ return fmt.Print(wrap(a, true)...)
+}
+
+// Printf is a convenience wrapper for fmt.Printf.
+//
+// Calling Printf(f, x, y) is equivalent to
+// fmt.Printf(f, Formatter(x), Formatter(y)).
+func Printf(format string, a ...interface{}) (n int, errno error) {
+ return fmt.Printf(format, wrap(a, false)...)
+}
+
+// Println pretty-prints its operands and writes to standard output.
+//
+// Calling Print(x, y) is equivalent to
+// fmt.Println(Formatter(x), Formatter(y)), but each operand is
+// formatted with "%# v".
+func Println(a ...interface{}) (n int, errno error) {
+ return fmt.Println(wrap(a, true)...)
+}
+
+// Sprint is a convenience wrapper for fmt.Sprintf.
+//
+// Calling Sprint(x, y) is equivalent to
+// fmt.Sprint(Formatter(x), Formatter(y)), but each operand is
+// formatted with "%# v".
+func Sprint(a ...interface{}) string {
+ return fmt.Sprint(wrap(a, true)...)
+}
+
+// Sprintf is a convenience wrapper for fmt.Sprintf.
+//
+// Calling Sprintf(f, x, y) is equivalent to
+// fmt.Sprintf(f, Formatter(x), Formatter(y)).
+func Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, wrap(a, false)...)
+}
+
+func wrap(a []interface{}, force bool) []interface{} {
+ w := make([]interface{}, len(a))
+ for i, x := range a {
+ w[i] = formatter{x: x, force: force}
+ }
+ return w
+}
diff --git a/vendor/github.com/kr/pretty/zero.go b/vendor/github.com/kr/pretty/zero.go
new file mode 100644
index 0000000000..abb5b6fc14
--- /dev/null
+++ b/vendor/github.com/kr/pretty/zero.go
@@ -0,0 +1,41 @@
+package pretty
+
+import (
+ "reflect"
+)
+
+func nonzero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() != 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() != 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() != 0
+ case reflect.Complex64, reflect.Complex128:
+ return v.Complex() != complex(0, 0)
+ case reflect.String:
+ return v.String() != ""
+ case reflect.Struct:
+ for i := 0; i < v.NumField(); i++ {
+ if nonzero(getField(v, i)) {
+ return true
+ }
+ }
+ return false
+ case reflect.Array:
+ for i := 0; i < v.Len(); i++ {
+ if nonzero(v.Index(i)) {
+ return true
+ }
+ }
+ return false
+ case reflect.Map, reflect.Interface, reflect.Slice, reflect.Ptr, reflect.Chan, reflect.Func:
+ return !v.IsNil()
+ case reflect.UnsafePointer:
+ return v.Pointer() != 0
+ }
+ return true
+}
diff --git a/vendor/github.com/kr/pty/.gitignore b/vendor/github.com/kr/pty/.gitignore
new file mode 100644
index 0000000000..1f0a99f2f2
--- /dev/null
+++ b/vendor/github.com/kr/pty/.gitignore
@@ -0,0 +1,4 @@
+[568].out
+_go*
+_test*
+_obj
diff --git a/vendor/github.com/kr/pty/License b/vendor/github.com/kr/pty/License
new file mode 100644
index 0000000000..6b7558b6b4
--- /dev/null
+++ b/vendor/github.com/kr/pty/License
@@ -0,0 +1,23 @@
+Copyright (c) 2011 Keith Rarick
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute,
+sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall
+be included in all copies or substantial portions of the
+Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
+OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/kr/pty/README.md b/vendor/github.com/kr/pty/README.md
new file mode 100644
index 0000000000..7b7900c3ae
--- /dev/null
+++ b/vendor/github.com/kr/pty/README.md
@@ -0,0 +1,36 @@
+# pty
+
+Pty is a Go package for using unix pseudo-terminals.
+
+## Install
+
+ go get github.com/kr/pty
+
+## Example
+
+```go
+package main
+
+import (
+ "github.com/kr/pty"
+ "io"
+ "os"
+ "os/exec"
+)
+
+func main() {
+ c := exec.Command("grep", "--color=auto", "bar")
+ f, err := pty.Start(c)
+ if err != nil {
+ panic(err)
+ }
+
+ go func() {
+ f.Write([]byte("foo\n"))
+ f.Write([]byte("bar\n"))
+ f.Write([]byte("baz\n"))
+ f.Write([]byte{4}) // EOT
+ }()
+ io.Copy(os.Stdout, f)
+}
+```
diff --git a/vendor/github.com/kr/pty/doc.go b/vendor/github.com/kr/pty/doc.go
new file mode 100644
index 0000000000..190cfbea92
--- /dev/null
+++ b/vendor/github.com/kr/pty/doc.go
@@ -0,0 +1,16 @@
+// Package pty provides functions for working with Unix terminals.
+package pty
+
+import (
+ "errors"
+ "os"
+)
+
+// ErrUnsupported is returned if a function is not
+// available on the current platform.
+var ErrUnsupported = errors.New("unsupported")
+
+// Opens a pty and its corresponding tty.
+func Open() (pty, tty *os.File, err error) {
+ return open()
+}
diff --git a/vendor/github.com/kr/pty/ioctl.go b/vendor/github.com/kr/pty/ioctl.go
new file mode 100644
index 0000000000..5b856e8711
--- /dev/null
+++ b/vendor/github.com/kr/pty/ioctl.go
@@ -0,0 +1,11 @@
+package pty
+
+import "syscall"
+
+func ioctl(fd, cmd, ptr uintptr) error {
+ _, _, e := syscall.Syscall(syscall.SYS_IOCTL, fd, cmd, ptr)
+ if e != 0 {
+ return e
+ }
+ return nil
+}
diff --git a/vendor/github.com/kr/pty/ioctl_bsd.go b/vendor/github.com/kr/pty/ioctl_bsd.go
new file mode 100644
index 0000000000..73b12c53cf
--- /dev/null
+++ b/vendor/github.com/kr/pty/ioctl_bsd.go
@@ -0,0 +1,39 @@
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package pty
+
+// from
+const (
+ _IOC_VOID uintptr = 0x20000000
+ _IOC_OUT uintptr = 0x40000000
+ _IOC_IN uintptr = 0x80000000
+ _IOC_IN_OUT uintptr = _IOC_OUT | _IOC_IN
+ _IOC_DIRMASK = _IOC_VOID | _IOC_OUT | _IOC_IN
+
+ _IOC_PARAM_SHIFT = 13
+ _IOC_PARAM_MASK = (1 << _IOC_PARAM_SHIFT) - 1
+)
+
+func _IOC_PARM_LEN(ioctl uintptr) uintptr {
+ return (ioctl >> 16) & _IOC_PARAM_MASK
+}
+
+func _IOC(inout uintptr, group byte, ioctl_num uintptr, param_len uintptr) uintptr {
+ return inout | (param_len&_IOC_PARAM_MASK)<<16 | uintptr(group)<<8 | ioctl_num
+}
+
+func _IO(group byte, ioctl_num uintptr) uintptr {
+ return _IOC(_IOC_VOID, group, ioctl_num, 0)
+}
+
+func _IOR(group byte, ioctl_num uintptr, param_len uintptr) uintptr {
+ return _IOC(_IOC_OUT, group, ioctl_num, param_len)
+}
+
+func _IOW(group byte, ioctl_num uintptr, param_len uintptr) uintptr {
+ return _IOC(_IOC_IN, group, ioctl_num, param_len)
+}
+
+func _IOWR(group byte, ioctl_num uintptr, param_len uintptr) uintptr {
+ return _IOC(_IOC_IN_OUT, group, ioctl_num, param_len)
+}
diff --git a/vendor/github.com/kr/pty/mktypes.bash b/vendor/github.com/kr/pty/mktypes.bash
new file mode 100755
index 0000000000..9952c88838
--- /dev/null
+++ b/vendor/github.com/kr/pty/mktypes.bash
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+
+GOOSARCH="${GOOS}_${GOARCH}"
+case "$GOOSARCH" in
+_* | *_ | _)
+ echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2
+ exit 1
+ ;;
+esac
+
+GODEFS="go tool cgo -godefs"
+
+$GODEFS types.go |gofmt > ztypes_$GOARCH.go
+
+case $GOOS in
+freebsd)
+ $GODEFS types_$GOOS.go |gofmt > ztypes_$GOOSARCH.go
+ ;;
+esac
diff --git a/vendor/github.com/kr/pty/pty_darwin.go b/vendor/github.com/kr/pty/pty_darwin.go
new file mode 100644
index 0000000000..4f4d5ca26e
--- /dev/null
+++ b/vendor/github.com/kr/pty/pty_darwin.go
@@ -0,0 +1,60 @@
+package pty
+
+import (
+ "errors"
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+func open() (pty, tty *os.File, err error) {
+ p, err := os.OpenFile("/dev/ptmx", os.O_RDWR, 0)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ sname, err := ptsname(p)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ err = grantpt(p)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ err = unlockpt(p)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ t, err := os.OpenFile(sname, os.O_RDWR, 0)
+ if err != nil {
+ return nil, nil, err
+ }
+ return p, t, nil
+}
+
+func ptsname(f *os.File) (string, error) {
+ n := make([]byte, _IOC_PARM_LEN(syscall.TIOCPTYGNAME))
+
+ err := ioctl(f.Fd(), syscall.TIOCPTYGNAME, uintptr(unsafe.Pointer(&n[0])))
+ if err != nil {
+ return "", err
+ }
+
+ for i, c := range n {
+ if c == 0 {
+ return string(n[:i]), nil
+ }
+ }
+ return "", errors.New("TIOCPTYGNAME string not NUL-terminated")
+}
+
+func grantpt(f *os.File) error {
+ return ioctl(f.Fd(), syscall.TIOCPTYGRANT, 0)
+}
+
+func unlockpt(f *os.File) error {
+ return ioctl(f.Fd(), syscall.TIOCPTYUNLK, 0)
+}
diff --git a/vendor/github.com/kr/pty/pty_freebsd.go b/vendor/github.com/kr/pty/pty_freebsd.go
new file mode 100644
index 0000000000..b341babd05
--- /dev/null
+++ b/vendor/github.com/kr/pty/pty_freebsd.go
@@ -0,0 +1,73 @@
+package pty
+
+import (
+ "errors"
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+func posix_openpt(oflag int) (fd int, err error) {
+ r0, _, e1 := syscall.Syscall(syscall.SYS_POSIX_OPENPT, uintptr(oflag), 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
+func open() (pty, tty *os.File, err error) {
+ fd, err := posix_openpt(syscall.O_RDWR | syscall.O_CLOEXEC)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ p := os.NewFile(uintptr(fd), "/dev/pts")
+ sname, err := ptsname(p)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ t, err := os.OpenFile("/dev/"+sname, os.O_RDWR, 0)
+ if err != nil {
+ return nil, nil, err
+ }
+ return p, t, nil
+}
+
+func isptmaster(fd uintptr) (bool, error) {
+ err := ioctl(fd, syscall.TIOCPTMASTER, 0)
+ return err == nil, err
+}
+
+var (
+ emptyFiodgnameArg fiodgnameArg
+ ioctl_FIODGNAME = _IOW('f', 120, unsafe.Sizeof(emptyFiodgnameArg))
+)
+
+func ptsname(f *os.File) (string, error) {
+ master, err := isptmaster(f.Fd())
+ if err != nil {
+ return "", err
+ }
+ if !master {
+ return "", syscall.EINVAL
+ }
+
+ const n = _C_SPECNAMELEN + 1
+ var (
+ buf = make([]byte, n)
+ arg = fiodgnameArg{Len: n, Buf: (*byte)(unsafe.Pointer(&buf[0]))}
+ )
+ err = ioctl(f.Fd(), ioctl_FIODGNAME, uintptr(unsafe.Pointer(&arg)))
+ if err != nil {
+ return "", err
+ }
+
+ for i, c := range buf {
+ if c == 0 {
+ return string(buf[:i]), nil
+ }
+ }
+ return "", errors.New("FIODGNAME string not NUL-terminated")
+}
diff --git a/vendor/github.com/kr/pty/pty_linux.go b/vendor/github.com/kr/pty/pty_linux.go
new file mode 100644
index 0000000000..cb901a21e0
--- /dev/null
+++ b/vendor/github.com/kr/pty/pty_linux.go
@@ -0,0 +1,46 @@
+package pty
+
+import (
+ "os"
+ "strconv"
+ "syscall"
+ "unsafe"
+)
+
+func open() (pty, tty *os.File, err error) {
+ p, err := os.OpenFile("/dev/ptmx", os.O_RDWR, 0)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ sname, err := ptsname(p)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ err = unlockpt(p)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ t, err := os.OpenFile(sname, os.O_RDWR|syscall.O_NOCTTY, 0)
+ if err != nil {
+ return nil, nil, err
+ }
+ return p, t, nil
+}
+
+func ptsname(f *os.File) (string, error) {
+ var n _C_uint
+ err := ioctl(f.Fd(), syscall.TIOCGPTN, uintptr(unsafe.Pointer(&n)))
+ if err != nil {
+ return "", err
+ }
+ return "/dev/pts/" + strconv.Itoa(int(n)), nil
+}
+
+func unlockpt(f *os.File) error {
+ var u _C_int
+ // use TIOCSPTLCK with a zero valued arg to clear the slave pty lock
+ return ioctl(f.Fd(), syscall.TIOCSPTLCK, uintptr(unsafe.Pointer(&u)))
+}
diff --git a/vendor/github.com/kr/pty/pty_unsupported.go b/vendor/github.com/kr/pty/pty_unsupported.go
new file mode 100644
index 0000000000..898c7303c4
--- /dev/null
+++ b/vendor/github.com/kr/pty/pty_unsupported.go
@@ -0,0 +1,11 @@
+// +build !linux,!darwin,!freebsd
+
+package pty
+
+import (
+ "os"
+)
+
+func open() (pty, tty *os.File, err error) {
+ return nil, nil, ErrUnsupported
+}
diff --git a/vendor/github.com/kr/pty/run.go b/vendor/github.com/kr/pty/run.go
new file mode 100644
index 0000000000..c2bc48878c
--- /dev/null
+++ b/vendor/github.com/kr/pty/run.go
@@ -0,0 +1,32 @@
+package pty
+
+import (
+ "os"
+ "os/exec"
+ "syscall"
+)
+
+// Start assigns a pseudo-terminal tty os.File to c.Stdin, c.Stdout,
+// and c.Stderr, calls c.Start, and returns the File of the tty's
+// corresponding pty.
+func Start(c *exec.Cmd) (pty *os.File, err error) {
+ pty, tty, err := Open()
+ if err != nil {
+ return nil, err
+ }
+ defer tty.Close()
+ c.Stdout = tty
+ c.Stdin = tty
+ c.Stderr = tty
+ if c.SysProcAttr == nil {
+ c.SysProcAttr = &syscall.SysProcAttr{}
+ }
+ c.SysProcAttr.Setctty = true
+ c.SysProcAttr.Setsid = true
+ err = c.Start()
+ if err != nil {
+ pty.Close()
+ return nil, err
+ }
+ return pty, err
+}
diff --git a/vendor/github.com/kr/pty/types.go b/vendor/github.com/kr/pty/types.go
new file mode 100644
index 0000000000..5aecb6bcdc
--- /dev/null
+++ b/vendor/github.com/kr/pty/types.go
@@ -0,0 +1,10 @@
+// +build ignore
+
+package pty
+
+import "C"
+
+type (
+ _C_int C.int
+ _C_uint C.uint
+)
diff --git a/vendor/github.com/kr/pty/types_freebsd.go b/vendor/github.com/kr/pty/types_freebsd.go
new file mode 100644
index 0000000000..ce3eb95181
--- /dev/null
+++ b/vendor/github.com/kr/pty/types_freebsd.go
@@ -0,0 +1,15 @@
+// +build ignore
+
+package pty
+
+/*
+#include
+#include
+*/
+import "C"
+
+const (
+ _C_SPECNAMELEN = C.SPECNAMELEN /* max length of devicename */
+)
+
+type fiodgnameArg C.struct_fiodgname_arg
diff --git a/vendor/github.com/kr/pty/util.go b/vendor/github.com/kr/pty/util.go
new file mode 100644
index 0000000000..67c52d06cd
--- /dev/null
+++ b/vendor/github.com/kr/pty/util.go
@@ -0,0 +1,35 @@
+package pty
+
+import (
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+// Getsize returns the number of rows (lines) and cols (positions
+// in each line) in terminal t.
+func Getsize(t *os.File) (rows, cols int, err error) {
+ var ws winsize
+ err = windowrect(&ws, t.Fd())
+ return int(ws.ws_row), int(ws.ws_col), err
+}
+
+type winsize struct {
+ ws_row uint16
+ ws_col uint16
+ ws_xpixel uint16
+ ws_ypixel uint16
+}
+
+func windowrect(ws *winsize, fd uintptr) error {
+ _, _, errno := syscall.Syscall(
+ syscall.SYS_IOCTL,
+ fd,
+ syscall.TIOCGWINSZ,
+ uintptr(unsafe.Pointer(ws)),
+ )
+ if errno != 0 {
+ return syscall.Errno(errno)
+ }
+ return nil
+}
diff --git a/vendor/github.com/kr/pty/ztypes_386.go b/vendor/github.com/kr/pty/ztypes_386.go
new file mode 100644
index 0000000000..ff0b8fd838
--- /dev/null
+++ b/vendor/github.com/kr/pty/ztypes_386.go
@@ -0,0 +1,9 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types.go
+
+package pty
+
+type (
+ _C_int int32
+ _C_uint uint32
+)
diff --git a/vendor/github.com/kr/pty/ztypes_amd64.go b/vendor/github.com/kr/pty/ztypes_amd64.go
new file mode 100644
index 0000000000..ff0b8fd838
--- /dev/null
+++ b/vendor/github.com/kr/pty/ztypes_amd64.go
@@ -0,0 +1,9 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types.go
+
+package pty
+
+type (
+ _C_int int32
+ _C_uint uint32
+)
diff --git a/vendor/github.com/kr/pty/ztypes_arm.go b/vendor/github.com/kr/pty/ztypes_arm.go
new file mode 100644
index 0000000000..ff0b8fd838
--- /dev/null
+++ b/vendor/github.com/kr/pty/ztypes_arm.go
@@ -0,0 +1,9 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types.go
+
+package pty
+
+type (
+ _C_int int32
+ _C_uint uint32
+)
diff --git a/vendor/github.com/kr/pty/ztypes_arm64.go b/vendor/github.com/kr/pty/ztypes_arm64.go
new file mode 100644
index 0000000000..6c29a4b918
--- /dev/null
+++ b/vendor/github.com/kr/pty/ztypes_arm64.go
@@ -0,0 +1,11 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types.go
+
+// +build arm64
+
+package pty
+
+type (
+ _C_int int32
+ _C_uint uint32
+)
diff --git a/vendor/github.com/kr/pty/ztypes_freebsd_386.go b/vendor/github.com/kr/pty/ztypes_freebsd_386.go
new file mode 100644
index 0000000000..d9975374e3
--- /dev/null
+++ b/vendor/github.com/kr/pty/ztypes_freebsd_386.go
@@ -0,0 +1,13 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_freebsd.go
+
+package pty
+
+const (
+ _C_SPECNAMELEN = 0x3f
+)
+
+type fiodgnameArg struct {
+ Len int32
+ Buf *byte
+}
diff --git a/vendor/github.com/kr/pty/ztypes_freebsd_amd64.go b/vendor/github.com/kr/pty/ztypes_freebsd_amd64.go
new file mode 100644
index 0000000000..5fa102fcdf
--- /dev/null
+++ b/vendor/github.com/kr/pty/ztypes_freebsd_amd64.go
@@ -0,0 +1,14 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_freebsd.go
+
+package pty
+
+const (
+ _C_SPECNAMELEN = 0x3f
+)
+
+type fiodgnameArg struct {
+ Len int32
+ Pad_cgo_0 [4]byte
+ Buf *byte
+}
diff --git a/vendor/github.com/kr/pty/ztypes_freebsd_arm.go b/vendor/github.com/kr/pty/ztypes_freebsd_arm.go
new file mode 100644
index 0000000000..d9975374e3
--- /dev/null
+++ b/vendor/github.com/kr/pty/ztypes_freebsd_arm.go
@@ -0,0 +1,13 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_freebsd.go
+
+package pty
+
+const (
+ _C_SPECNAMELEN = 0x3f
+)
+
+type fiodgnameArg struct {
+ Len int32
+ Buf *byte
+}
diff --git a/vendor/github.com/kr/pty/ztypes_ppc64.go b/vendor/github.com/kr/pty/ztypes_ppc64.go
new file mode 100644
index 0000000000..4e1af84312
--- /dev/null
+++ b/vendor/github.com/kr/pty/ztypes_ppc64.go
@@ -0,0 +1,11 @@
+// +build ppc64
+
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types.go
+
+package pty
+
+type (
+ _C_int int32
+ _C_uint uint32
+)
diff --git a/vendor/github.com/kr/pty/ztypes_ppc64le.go b/vendor/github.com/kr/pty/ztypes_ppc64le.go
new file mode 100644
index 0000000000..e6780f4e23
--- /dev/null
+++ b/vendor/github.com/kr/pty/ztypes_ppc64le.go
@@ -0,0 +1,11 @@
+// +build ppc64le
+
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types.go
+
+package pty
+
+type (
+ _C_int int32
+ _C_uint uint32
+)
diff --git a/vendor/github.com/kr/pty/ztypes_s390x.go b/vendor/github.com/kr/pty/ztypes_s390x.go
new file mode 100644
index 0000000000..a7452b61cb
--- /dev/null
+++ b/vendor/github.com/kr/pty/ztypes_s390x.go
@@ -0,0 +1,11 @@
+// +build s390x
+
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types.go
+
+package pty
+
+type (
+ _C_int int32
+ _C_uint uint32
+)
diff --git a/vendor/github.com/kr/text/License b/vendor/github.com/kr/text/License
new file mode 100644
index 0000000000..480a328059
--- /dev/null
+++ b/vendor/github.com/kr/text/License
@@ -0,0 +1,19 @@
+Copyright 2012 Keith Rarick
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/kr/text/Readme b/vendor/github.com/kr/text/Readme
new file mode 100644
index 0000000000..7e6e7c0687
--- /dev/null
+++ b/vendor/github.com/kr/text/Readme
@@ -0,0 +1,3 @@
+This is a Go package for manipulating paragraphs of text.
+
+See http://go.pkgdoc.org/github.com/kr/text for full documentation.
diff --git a/vendor/github.com/kr/text/colwriter/Readme b/vendor/github.com/kr/text/colwriter/Readme
new file mode 100644
index 0000000000..1c1f4e6839
--- /dev/null
+++ b/vendor/github.com/kr/text/colwriter/Readme
@@ -0,0 +1,5 @@
+Package colwriter provides a write filter that formats
+input lines in multiple columns.
+
+The package is a straightforward translation from
+/src/cmd/draw/mc.c in Plan 9 from User Space.
diff --git a/vendor/github.com/kr/text/colwriter/column.go b/vendor/github.com/kr/text/colwriter/column.go
new file mode 100644
index 0000000000..7302ce9f7a
--- /dev/null
+++ b/vendor/github.com/kr/text/colwriter/column.go
@@ -0,0 +1,147 @@
+// Package colwriter provides a write filter that formats
+// input lines in multiple columns.
+//
+// The package is a straightforward translation from
+// /src/cmd/draw/mc.c in Plan 9 from User Space.
+package colwriter
+
+import (
+ "bytes"
+ "io"
+ "unicode/utf8"
+)
+
+const (
+ tab = 4
+)
+
+const (
+ // Print each input line ending in a colon ':' separately.
+ BreakOnColon uint = 1 << iota
+)
+
+// A Writer is a filter that arranges input lines in as many columns as will
+// fit in its width. Tab '\t' chars in the input are translated to sequences
+// of spaces ending at multiples of 4 positions.
+//
+// If BreakOnColon is set, each input line ending in a colon ':' is written
+// separately.
+//
+// The Writer assumes that all Unicode code points have the same width; this
+// may not be true in some fonts.
+type Writer struct {
+ w io.Writer
+ buf []byte
+ width int
+ flag uint
+}
+
+// NewWriter allocates and initializes a new Writer writing to w.
+// Parameter width controls the total number of characters on each line
+// across all columns.
+func NewWriter(w io.Writer, width int, flag uint) *Writer {
+ return &Writer{
+ w: w,
+ width: width,
+ flag: flag,
+ }
+}
+
+// Write writes p to the writer w. The only errors returned are ones
+// encountered while writing to the underlying output stream.
+func (w *Writer) Write(p []byte) (n int, err error) {
+ var linelen int
+ var lastWasColon bool
+ for i, c := range p {
+ w.buf = append(w.buf, c)
+ linelen++
+ if c == '\t' {
+ w.buf[len(w.buf)-1] = ' '
+ for linelen%tab != 0 {
+ w.buf = append(w.buf, ' ')
+ linelen++
+ }
+ }
+ if w.flag&BreakOnColon != 0 && c == ':' {
+ lastWasColon = true
+ } else if lastWasColon {
+ if c == '\n' {
+ pos := bytes.LastIndex(w.buf[:len(w.buf)-1], []byte{'\n'})
+ if pos < 0 {
+ pos = 0
+ }
+ line := w.buf[pos:]
+ w.buf = w.buf[:pos]
+ if err = w.columnate(); err != nil {
+ if len(line) < i {
+ return i - len(line), err
+ }
+ return 0, err
+ }
+ if n, err := w.w.Write(line); err != nil {
+ if r := len(line) - n; r < i {
+ return i - r, err
+ }
+ return 0, err
+ }
+ }
+ lastWasColon = false
+ }
+ if c == '\n' {
+ linelen = 0
+ }
+ }
+ return len(p), nil
+}
+
+// Flush should be called after the last call to Write to ensure that any data
+// buffered in the Writer is written to output.
+func (w *Writer) Flush() error {
+ return w.columnate()
+}
+
+func (w *Writer) columnate() error {
+ words := bytes.Split(w.buf, []byte{'\n'})
+ w.buf = nil
+ if len(words[len(words)-1]) == 0 {
+ words = words[:len(words)-1]
+ }
+ maxwidth := 0
+ for _, wd := range words {
+ if n := utf8.RuneCount(wd); n > maxwidth {
+ maxwidth = n
+ }
+ }
+ maxwidth++ // space char
+ wordsPerLine := w.width / maxwidth
+ if wordsPerLine <= 0 {
+ wordsPerLine = 1
+ }
+ nlines := (len(words) + wordsPerLine - 1) / wordsPerLine
+ for i := 0; i < nlines; i++ {
+ col := 0
+ endcol := 0
+ for j := i; j < len(words); j += nlines {
+ endcol += maxwidth
+ _, err := w.w.Write(words[j])
+ if err != nil {
+ return err
+ }
+ col += utf8.RuneCount(words[j])
+ if j+nlines < len(words) {
+ for col < endcol {
+ _, err := w.w.Write([]byte{' '})
+ if err != nil {
+ return err
+ }
+ col++
+ }
+ }
+ }
+ _, err := w.w.Write([]byte{'\n'})
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/kr/text/colwriter/column_test.go b/vendor/github.com/kr/text/colwriter/column_test.go
new file mode 100644
index 0000000000..ce388f5a26
--- /dev/null
+++ b/vendor/github.com/kr/text/colwriter/column_test.go
@@ -0,0 +1,90 @@
+package colwriter
+
+import (
+ "bytes"
+ "testing"
+)
+
+var src = `
+.git
+.gitignore
+.godir
+Procfile:
+README.md
+api.go
+apps.go
+auth.go
+darwin.go
+data.go
+dyno.go:
+env.go
+git.go
+help.go
+hkdist
+linux.go
+ls.go
+main.go
+plugin.go
+run.go
+scale.go
+ssh.go
+tail.go
+term
+unix.go
+update.go
+version.go
+windows.go
+`[1:]
+
+var tests = []struct {
+ wid int
+ flag uint
+ src string
+ want string
+}{
+ {80, 0, "", ""},
+ {80, 0, src, `
+.git README.md darwin.go git.go ls.go scale.go unix.go
+.gitignore api.go data.go help.go main.go ssh.go update.go
+.godir apps.go dyno.go: hkdist plugin.go tail.go version.go
+Procfile: auth.go env.go linux.go run.go term windows.go
+`[1:]},
+ {80, BreakOnColon, src, `
+.git .gitignore .godir
+
+Procfile:
+README.md api.go apps.go auth.go darwin.go data.go
+
+dyno.go:
+env.go hkdist main.go scale.go term version.go
+git.go linux.go plugin.go ssh.go unix.go windows.go
+help.go ls.go run.go tail.go update.go
+`[1:]},
+ {20, 0, `
+Hello
+Γειά σου
+안녕
+今日は
+`[1:], `
+Hello 안녕
+Γειά σου 今日は
+`[1:]},
+}
+
+func TestWriter(t *testing.T) {
+ for _, test := range tests {
+ b := new(bytes.Buffer)
+ w := NewWriter(b, test.wid, test.flag)
+ if _, err := w.Write([]byte(test.src)); err != nil {
+ t.Error(err)
+ }
+ if err := w.Flush(); err != nil {
+ t.Error(err)
+ }
+ if g := b.String(); test.want != g {
+ t.Log("\n" + test.want)
+ t.Log("\n" + g)
+ t.Errorf("%q != %q", test.want, g)
+ }
+ }
+}
diff --git a/vendor/github.com/kr/text/doc.go b/vendor/github.com/kr/text/doc.go
new file mode 100644
index 0000000000..cf4c198f95
--- /dev/null
+++ b/vendor/github.com/kr/text/doc.go
@@ -0,0 +1,3 @@
+// Package text provides rudimentary functions for manipulating text in
+// paragraphs.
+package text
diff --git a/vendor/github.com/kr/text/indent.go b/vendor/github.com/kr/text/indent.go
new file mode 100644
index 0000000000..4ebac45c09
--- /dev/null
+++ b/vendor/github.com/kr/text/indent.go
@@ -0,0 +1,74 @@
+package text
+
+import (
+ "io"
+)
+
+// Indent inserts prefix at the beginning of each non-empty line of s. The
+// end-of-line marker is NL.
+func Indent(s, prefix string) string {
+ return string(IndentBytes([]byte(s), []byte(prefix)))
+}
+
+// IndentBytes inserts prefix at the beginning of each non-empty line of b.
+// The end-of-line marker is NL.
+func IndentBytes(b, prefix []byte) []byte {
+ var res []byte
+ bol := true
+ for _, c := range b {
+ if bol && c != '\n' {
+ res = append(res, prefix...)
+ }
+ res = append(res, c)
+ bol = c == '\n'
+ }
+ return res
+}
+
+// Writer indents each line of its input.
+type indentWriter struct {
+ w io.Writer
+ bol bool
+ pre [][]byte
+ sel int
+ off int
+}
+
+// NewIndentWriter makes a new write filter that indents the input
+// lines. Each line is prefixed in order with the corresponding
+// element of pre. If there are more lines than elements, the last
+// element of pre is repeated for each subsequent line.
+func NewIndentWriter(w io.Writer, pre ...[]byte) io.Writer {
+ return &indentWriter{
+ w: w,
+ pre: pre,
+ bol: true,
+ }
+}
+
+// The only errors returned are from the underlying indentWriter.
+func (w *indentWriter) Write(p []byte) (n int, err error) {
+ for _, c := range p {
+ if w.bol {
+ var i int
+ i, err = w.w.Write(w.pre[w.sel][w.off:])
+ w.off += i
+ if err != nil {
+ return n, err
+ }
+ }
+ _, err = w.w.Write([]byte{c})
+ if err != nil {
+ return n, err
+ }
+ n++
+ w.bol = c == '\n'
+ if w.bol {
+ w.off = 0
+ if w.sel < len(w.pre)-1 {
+ w.sel++
+ }
+ }
+ }
+ return n, nil
+}
diff --git a/vendor/github.com/kr/text/indent_test.go b/vendor/github.com/kr/text/indent_test.go
new file mode 100644
index 0000000000..5c723eee85
--- /dev/null
+++ b/vendor/github.com/kr/text/indent_test.go
@@ -0,0 +1,119 @@
+package text
+
+import (
+ "bytes"
+ "testing"
+)
+
+type T struct {
+ inp, exp, pre string
+}
+
+var tests = []T{
+ {
+ "The quick brown fox\njumps over the lazy\ndog.\nBut not quickly.\n",
+ "xxxThe quick brown fox\nxxxjumps over the lazy\nxxxdog.\nxxxBut not quickly.\n",
+ "xxx",
+ },
+ {
+ "The quick brown fox\njumps over the lazy\ndog.\n\nBut not quickly.",
+ "xxxThe quick brown fox\nxxxjumps over the lazy\nxxxdog.\n\nxxxBut not quickly.",
+ "xxx",
+ },
+}
+
+func TestIndent(t *testing.T) {
+ for _, test := range tests {
+ got := Indent(test.inp, test.pre)
+ if got != test.exp {
+ t.Errorf("mismatch %q != %q", got, test.exp)
+ }
+ }
+}
+
+type IndentWriterTest struct {
+ inp, exp string
+ pre []string
+}
+
+var ts = []IndentWriterTest{
+ {
+ `
+The quick brown fox
+jumps over the lazy
+dog.
+But not quickly.
+`[1:],
+ `
+xxxThe quick brown fox
+xxxjumps over the lazy
+xxxdog.
+xxxBut not quickly.
+`[1:],
+ []string{"xxx"},
+ },
+ {
+ `
+The quick brown fox
+jumps over the lazy
+dog.
+But not quickly.
+`[1:],
+ `
+xxaThe quick brown fox
+xxxjumps over the lazy
+xxxdog.
+xxxBut not quickly.
+`[1:],
+ []string{"xxa", "xxx"},
+ },
+ {
+ `
+The quick brown fox
+jumps over the lazy
+dog.
+But not quickly.
+`[1:],
+ `
+xxaThe quick brown fox
+xxbjumps over the lazy
+xxcdog.
+xxxBut not quickly.
+`[1:],
+ []string{"xxa", "xxb", "xxc", "xxx"},
+ },
+ {
+ `
+The quick brown fox
+jumps over the lazy
+dog.
+
+But not quickly.`[1:],
+ `
+xxaThe quick brown fox
+xxxjumps over the lazy
+xxxdog.
+xxx
+xxxBut not quickly.`[1:],
+ []string{"xxa", "xxx"},
+ },
+}
+
+func TestIndentWriter(t *testing.T) {
+ for _, test := range ts {
+ b := new(bytes.Buffer)
+ pre := make([][]byte, len(test.pre))
+ for i := range test.pre {
+ pre[i] = []byte(test.pre[i])
+ }
+ w := NewIndentWriter(b, pre...)
+ if _, err := w.Write([]byte(test.inp)); err != nil {
+ t.Error(err)
+ }
+ if got := b.String(); got != test.exp {
+ t.Errorf("mismatch %q != %q", got, test.exp)
+ t.Log(got)
+ t.Log(test.exp)
+ }
+ }
+}
diff --git a/vendor/github.com/kr/text/mc/Readme b/vendor/github.com/kr/text/mc/Readme
new file mode 100644
index 0000000000..519ddc00a1
--- /dev/null
+++ b/vendor/github.com/kr/text/mc/Readme
@@ -0,0 +1,9 @@
+Command mc prints in multiple columns.
+
+ Usage: mc [-] [-N] [file...]
+
+Mc splits the input into as many columns as will fit in N
+print positions. If the output is a tty, the default N is
+the number of characters in a terminal line; otherwise the
+default N is 80. Under option - each input line ending in
+a colon ':' is printed separately.
diff --git a/vendor/github.com/kr/text/mc/mc.go b/vendor/github.com/kr/text/mc/mc.go
new file mode 100644
index 0000000000..00169a30f1
--- /dev/null
+++ b/vendor/github.com/kr/text/mc/mc.go
@@ -0,0 +1,62 @@
+// Command mc prints in multiple columns.
+//
+// Usage: mc [-] [-N] [file...]
+//
+// Mc splits the input into as many columns as will fit in N
+// print positions. If the output is a tty, the default N is
+// the number of characters in a terminal line; otherwise the
+// default N is 80. Under option - each input line ending in
+// a colon ':' is printed separately.
+package main
+
+import (
+ "github.com/kr/pty"
+ "github.com/kr/text/colwriter"
+ "io"
+ "log"
+ "os"
+ "strconv"
+)
+
+func main() {
+ var width int
+ var flag uint
+ args := os.Args[1:]
+ for len(args) > 0 && len(args[0]) > 0 && args[0][0] == '-' {
+ if len(args[0]) > 1 {
+ width, _ = strconv.Atoi(args[0][1:])
+ } else {
+ flag |= colwriter.BreakOnColon
+ }
+ args = args[1:]
+ }
+ if width < 1 {
+ _, width, _ = pty.Getsize(os.Stdout)
+ }
+ if width < 1 {
+ width = 80
+ }
+
+ w := colwriter.NewWriter(os.Stdout, width, flag)
+ if len(args) > 0 {
+ for _, s := range args {
+ if f, err := os.Open(s); err == nil {
+ copyin(w, f)
+ f.Close()
+ } else {
+ log.Println(err)
+ }
+ }
+ } else {
+ copyin(w, os.Stdin)
+ }
+}
+
+func copyin(w *colwriter.Writer, r io.Reader) {
+ if _, err := io.Copy(w, r); err != nil {
+ log.Println(err)
+ }
+ if err := w.Flush(); err != nil {
+ log.Println(err)
+ }
+}
diff --git a/vendor/github.com/kr/text/wrap.go b/vendor/github.com/kr/text/wrap.go
new file mode 100644
index 0000000000..b09bb03736
--- /dev/null
+++ b/vendor/github.com/kr/text/wrap.go
@@ -0,0 +1,86 @@
+package text
+
+import (
+ "bytes"
+ "math"
+)
+
+var (
+ nl = []byte{'\n'}
+ sp = []byte{' '}
+)
+
+const defaultPenalty = 1e5
+
+// Wrap wraps s into a paragraph of lines of length lim, with minimal
+// raggedness.
+func Wrap(s string, lim int) string {
+ return string(WrapBytes([]byte(s), lim))
+}
+
+// WrapBytes wraps b into a paragraph of lines of length lim, with minimal
+// raggedness.
+func WrapBytes(b []byte, lim int) []byte {
+ words := bytes.Split(bytes.Replace(bytes.TrimSpace(b), nl, sp, -1), sp)
+ var lines [][]byte
+ for _, line := range WrapWords(words, 1, lim, defaultPenalty) {
+ lines = append(lines, bytes.Join(line, sp))
+ }
+ return bytes.Join(lines, nl)
+}
+
+// WrapWords is the low-level line-breaking algorithm, useful if you need more
+// control over the details of the text wrapping process. For most uses, either
+// Wrap or WrapBytes will be sufficient and more convenient.
+//
+// WrapWords splits a list of words into lines with minimal "raggedness",
+// treating each byte as one unit, accounting for spc units between adjacent
+// words on each line, and attempting to limit lines to lim units. Raggedness
+// is the total error over all lines, where error is the square of the
+// difference of the length of the line and lim. Too-long lines (which only
+// happen when a single word is longer than lim units) have pen penalty units
+// added to the error.
+func WrapWords(words [][]byte, spc, lim, pen int) [][][]byte {
+ n := len(words)
+
+ length := make([][]int, n)
+ for i := 0; i < n; i++ {
+ length[i] = make([]int, n)
+ length[i][i] = len(words[i])
+ for j := i + 1; j < n; j++ {
+ length[i][j] = length[i][j-1] + spc + len(words[j])
+ }
+ }
+
+ nbrk := make([]int, n)
+ cost := make([]int, n)
+ for i := range cost {
+ cost[i] = math.MaxInt32
+ }
+ for i := n - 1; i >= 0; i-- {
+ if length[i][n-1] <= lim || i == n-1 {
+ cost[i] = 0
+ nbrk[i] = n
+ } else {
+ for j := i + 1; j < n; j++ {
+ d := lim - length[i][j-1]
+ c := d*d + cost[j]
+ if length[i][j-1] > lim {
+ c += pen // too-long lines get a worse penalty
+ }
+ if c < cost[i] {
+ cost[i] = c
+ nbrk[i] = j
+ }
+ }
+ }
+ }
+
+ var lines [][][]byte
+ i := 0
+ for i < n {
+ lines = append(lines, words[i:nbrk[i]])
+ i = nbrk[i]
+ }
+ return lines
+}
diff --git a/vendor/github.com/kr/text/wrap_test.go b/vendor/github.com/kr/text/wrap_test.go
new file mode 100644
index 0000000000..634b6e8ebb
--- /dev/null
+++ b/vendor/github.com/kr/text/wrap_test.go
@@ -0,0 +1,62 @@
+package text
+
+import (
+ "bytes"
+ "testing"
+)
+
+var text = "The quick brown fox jumps over the lazy dog."
+
+func TestWrap(t *testing.T) {
+ exp := [][]string{
+ {"The", "quick", "brown", "fox"},
+ {"jumps", "over", "the", "lazy", "dog."},
+ }
+ words := bytes.Split([]byte(text), sp)
+ got := WrapWords(words, 1, 24, defaultPenalty)
+ if len(exp) != len(got) {
+ t.Fail()
+ }
+ for i := range exp {
+ if len(exp[i]) != len(got[i]) {
+ t.Fail()
+ }
+ for j := range exp[i] {
+ if exp[i][j] != string(got[i][j]) {
+ t.Fatal(i, exp[i][j], got[i][j])
+ }
+ }
+ }
+}
+
+func TestWrapNarrow(t *testing.T) {
+ exp := "The\nquick\nbrown\nfox\njumps\nover\nthe\nlazy\ndog."
+ if Wrap(text, 5) != exp {
+ t.Fail()
+ }
+}
+
+func TestWrapOneLine(t *testing.T) {
+ exp := "The quick brown fox jumps over the lazy dog."
+ if Wrap(text, 500) != exp {
+ t.Fail()
+ }
+}
+
+func TestWrapBug1(t *testing.T) {
+ cases := []struct {
+ limit int
+ text string
+ want string
+ }{
+ {4, "aaaaa", "aaaaa"},
+ {4, "a aaaaa", "a\naaaaa"},
+ }
+
+ for _, test := range cases {
+ got := Wrap(test.text, test.limit)
+ if got != test.want {
+ t.Errorf("Wrap(%q, %d) = %q want %q", test.text, test.limit, got, test.want)
+ }
+ }
+}
diff --git a/vendor/github.com/magiconair/properties/.gitignore b/vendor/github.com/magiconair/properties/.gitignore
new file mode 100644
index 0000000000..7054822dc9
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/.gitignore
@@ -0,0 +1,4 @@
+*.sublime-project
+*.sublime-workspace
+*.un~
+*.swp
diff --git a/vendor/github.com/magiconair/properties/.travis.yml b/vendor/github.com/magiconair/properties/.travis.yml
new file mode 100644
index 0000000000..5ef5d72f3b
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/.travis.yml
@@ -0,0 +1,6 @@
+language: go
+go:
+ - 1.4.3
+ - 1.5.3
+ - 1.6
+ - tip
diff --git a/vendor/github.com/magiconair/properties/CHANGELOG.md b/vendor/github.com/magiconair/properties/CHANGELOG.md
new file mode 100644
index 0000000000..bf49a1376f
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/CHANGELOG.md
@@ -0,0 +1,81 @@
+## Changelog
+
+### [1.7.0](https://github.com/magiconair/properties/tags/v1.7.0) - 20 Mar 2016
+
+ * [Issue #10](https://github.com/magiconair/properties/issues/10): Add [LoadURL,LoadURLs,MustLoadURL,MustLoadURLs](http://godoc.org/github.com/magiconair/properties#Properties.LoadURL) method to load properties from a URL.
+ * [Issue #11](https://github.com/magiconair/properties/issues/11): Add [LoadString,MustLoadString](http://godoc.org/github.com/magiconair/properties#Properties.LoadString) method to load properties from an UTF8 string.
+ * [PR #8](https://github.com/magiconair/properties/pull/8): Add [MustFlag](http://godoc.org/github.com/magiconair/properties#Properties.MustFlag) method to provide overrides via command line flags. (@pascaldekloe)
+
+### [1.6.0](https://github.com/magiconair/properties/tags/v1.6.0) - 11 Dec 2015
+
+ * Add [Decode](http://godoc.org/github.com/magiconair/properties#Properties.Decode) method to populate struct from properties via tags.
+
+### [1.5.6](https://github.com/magiconair/properties/tags/v1.5.6) - 18 Oct 2015
+
+ * Vendored in gopkg.in/check.v1
+
+### [1.5.5](https://github.com/magiconair/properties/tags/v1.5.5) - 31 Jul 2015
+
+ * [PR #6](https://github.com/magiconair/properties/pull/6): Add [Delete](http://godoc.org/github.com/magiconair/properties#Properties.Delete) method to remove keys including comments. (@gerbenjacobs)
+
+### [1.5.4](https://github.com/magiconair/properties/tags/v1.5.4) - 23 Jun 2015
+
+ * [Issue #5](https://github.com/magiconair/properties/issues/5): Allow disabling of property expansion [DisableExpansion](http://godoc.org/github.com/magiconair/properties#Properties.DisableExpansion). When property expansion is disabled Properties become a simple key/value store and don't check for circular references.
+
+### [1.5.3](https://github.com/magiconair/properties/tags/v1.5.3) - 02 Jun 2015
+
+ * [Issue #4](https://github.com/magiconair/properties/issues/4): Maintain key order in [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) and [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp)
+
+### [1.5.2](https://github.com/magiconair/properties/tags/v1.5.2) - 10 Apr 2015
+
+ * [Issue #3](https://github.com/magiconair/properties/issues/3): Don't print comments in [WriteComment()](http://godoc.org/github.com/magiconair/properties#Properties.WriteComment) if they are all empty
+ * Add clickable links to README
+
+### [1.5.1](https://github.com/magiconair/properties/tags/v1.5.1) - 08 Dec 2014
+
+ * Added [GetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.GetParsedDuration) and [MustGetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.MustGetParsedDuration) for values specified compatible with
+ [time.ParseDuration()](http://golang.org/pkg/time/#ParseDuration).
+
+### [1.5.0](https://github.com/magiconair/properties/tags/v1.5.0) - 18 Nov 2014
+
+ * Added support for single and multi-line comments (reading, writing and updating)
+ * The order of keys is now preserved
+ * Calling [Set()](http://godoc.org/github.com/magiconair/properties#Properties.Set) with an empty key now silently ignores the call and does not create a new entry
+ * Added a [MustSet()](http://godoc.org/github.com/magiconair/properties#Properties.MustSet) method
+ * Migrated test library from launchpad.net/gocheck to [gopkg.in/check.v1](http://gopkg.in/check.v1)
+
+### [1.4.2](https://github.com/magiconair/properties/tags/v1.4.2) - 15 Nov 2014
+
+ * [Issue #2](https://github.com/magiconair/properties/issues/2): Fixed goroutine leak in parser which created two lexers but cleaned up only one
+
+### [1.4.1](https://github.com/magiconair/properties/tags/v1.4.1) - 13 Nov 2014
+
+ * [Issue #1](https://github.com/magiconair/properties/issues/1): Fixed bug in Keys() method which returned an empty string
+
+### [1.4.0](https://github.com/magiconair/properties/tags/v1.4.0) - 23 Sep 2014
+
+ * Added [Keys()](http://godoc.org/github.com/magiconair/properties#Properties.Keys) to get the keys
+ * Added [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) and [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) to get a subset of the properties
+
+### [1.3.0](https://github.com/magiconair/properties/tags/v1.3.0) - 18 Mar 2014
+
+* Added support for time.Duration
+* Made MustXXX() failure beha[ior configurable (log.Fatal, panic](https://github.com/magiconair/properties/tags/vior configurable (log.Fatal, panic) - custom)
+* Changed default of MustXXX() failure from panic to log.Fatal
+
+### [1.2.0](https://github.com/magiconair/properties/tags/v1.2.0) - 05 Mar 2014
+
+* Added MustGet... functions
+* Added support for int and uint with range checks on 32 bit platforms
+
+### [1.1.0](https://github.com/magiconair/properties/tags/v1.1.0) - 20 Jan 2014
+
+* Renamed from goproperties to properties
+* Added support for expansion of environment vars in
+ filenames and value expressions
+* Fixed bug where value expressions were not at the
+ start of the string
+
+### [1.0.0](https://github.com/magiconair/properties/tags/v1.0.0) - 7 Jan 2014
+
+* Initial release
diff --git a/vendor/github.com/magiconair/properties/LICENSE b/vendor/github.com/magiconair/properties/LICENSE
new file mode 100644
index 0000000000..7eab43b6bf
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/LICENSE
@@ -0,0 +1,25 @@
+goproperties - properties file decoder for Go
+
+Copyright (c) 2013-2014 - Frank Schroeder
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/magiconair/properties/README.md b/vendor/github.com/magiconair/properties/README.md
new file mode 100644
index 0000000000..1ae0035a0d
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/README.md
@@ -0,0 +1,81 @@
+Overview [![Build Status](https://travis-ci.org/magiconair/properties.svg?branch=master)](https://travis-ci.org/magiconair/properties)
+========
+
+#### Current version: 1.7.0
+
+properties is a Go library for reading and writing properties files.
+
+It supports reading from multiple files or URLs and Spring style recursive
+property expansion of expressions like `${key}` to their corresponding value.
+Value expressions can refer to other keys like in `${key}` or to environment
+variables like in `${USER}`. Filenames can also contain environment variables
+like in `/home/${USER}/myapp.properties`.
+
+Properties can be decoded into structs, maps, arrays and values through
+struct tags.
+
+Comments and the order of keys are preserved. Comments can be modified
+and can be written to the output.
+
+The properties library supports both ISO-8859-1 and UTF-8 encoded data.
+
+Starting from version 1.3.0 the behavior of the MustXXX() functions is
+configurable by providing a custom `ErrorHandler` function. The default has
+changed from `panic` to `log.Fatal` but this is configurable and custom
+error handling functions can be provided. See the package documentation for
+details.
+
+Getting Started
+---------------
+
+```go
+import (
+ "flag"
+ "github.com/magiconair/properties"
+)
+
+func main() {
+ p := properties.MustLoadFile("${HOME}/config.properties", properties.UTF8)
+
+ // via getters
+ host := p.MustGetString("host")
+ port := p.GetInt("port", 8080)
+
+ // or via decode
+ type Config struct {
+ Host string `properties:"host"`
+ Port int `properties:"port,default=9000"`
+ Accept []string `properties:"accept,default=image/png;image;gif"`
+ Timeout time.Duration `properties:"timeout,default=5s"`
+ }
+ var cfg Config
+ if err := p.Decode(&cfg); err != nil {
+ log.Fatal(err)
+ }
+
+ // or via flags
+ p.MustFlag(flag.CommandLine)
+
+ // or via url
+ p = properties.MustLoadURL("http://host/path")
+}
+
+```
+
+Read the full documentation on [GoDoc](https://godoc.org/github.com/magiconair/properties) [![GoDoc](https://godoc.org/github.com/magiconair/properties?status.png)](https://godoc.org/github.com/magiconair/properties)
+
+Installation and Upgrade
+------------------------
+
+```
+$ go get -u github.com/magiconair/properties
+```
+
+License
+-------
+
+2 clause BSD license. See [LICENSE](https://github.com/magiconair/properties/blob/master/LICENSE) file for details.
+
+ToDo
+----
+* Dump contents with passwords and secrets obscured
diff --git a/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/LICENSE b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/LICENSE
new file mode 100644
index 0000000000..545cf2d331
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/LICENSE
@@ -0,0 +1,25 @@
+Gocheck - A rich testing framework for Go
+
+Copyright (c) 2010-2013 Gustavo Niemeyer
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/README.md b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/README.md
new file mode 100644
index 0000000000..0ca9e57260
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/README.md
@@ -0,0 +1,20 @@
+Instructions
+============
+
+Install the package with:
+
+ go get gopkg.in/check.v1
+
+Import it with:
+
+ import "gopkg.in/check.v1"
+
+and use _check_ as the package name inside the code.
+
+For more details, visit the project page:
+
+* http://labix.org/gocheck
+
+and the API documentation:
+
+* https://gopkg.in/check.v1
diff --git a/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/TODO b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/TODO
new file mode 100644
index 0000000000..33498270ea
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/TODO
@@ -0,0 +1,2 @@
+- Assert(slice, Contains, item)
+- Parallel test support
diff --git a/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/benchmark.go b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/benchmark.go
new file mode 100644
index 0000000000..48cb8c8114
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/benchmark.go
@@ -0,0 +1,163 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package check
+
+import (
+ "fmt"
+ "runtime"
+ "time"
+)
+
+var memStats runtime.MemStats
+
+// testingB is a type passed to Benchmark functions to manage benchmark
+// timing and to specify the number of iterations to run.
+type timer struct {
+ start time.Time // Time test or benchmark started
+ duration time.Duration
+ N int
+ bytes int64
+ timerOn bool
+ benchTime time.Duration
+ // The initial states of memStats.Mallocs and memStats.TotalAlloc.
+ startAllocs uint64
+ startBytes uint64
+ // The net total of this test after being run.
+ netAllocs uint64
+ netBytes uint64
+}
+
+// StartTimer starts timing a test. This function is called automatically
+// before a benchmark starts, but it can also used to resume timing after
+// a call to StopTimer.
+func (c *C) StartTimer() {
+ if !c.timerOn {
+ c.start = time.Now()
+ c.timerOn = true
+
+ runtime.ReadMemStats(&memStats)
+ c.startAllocs = memStats.Mallocs
+ c.startBytes = memStats.TotalAlloc
+ }
+}
+
+// StopTimer stops timing a test. This can be used to pause the timer
+// while performing complex initialization that you don't
+// want to measure.
+func (c *C) StopTimer() {
+ if c.timerOn {
+ c.duration += time.Now().Sub(c.start)
+ c.timerOn = false
+ runtime.ReadMemStats(&memStats)
+ c.netAllocs += memStats.Mallocs - c.startAllocs
+ c.netBytes += memStats.TotalAlloc - c.startBytes
+ }
+}
+
+// ResetTimer sets the elapsed benchmark time to zero.
+// It does not affect whether the timer is running.
+func (c *C) ResetTimer() {
+ if c.timerOn {
+ c.start = time.Now()
+ runtime.ReadMemStats(&memStats)
+ c.startAllocs = memStats.Mallocs
+ c.startBytes = memStats.TotalAlloc
+ }
+ c.duration = 0
+ c.netAllocs = 0
+ c.netBytes = 0
+}
+
+// SetBytes informs the number of bytes that the benchmark processes
+// on each iteration. If this is called in a benchmark it will also
+// report MB/s.
+func (c *C) SetBytes(n int64) {
+ c.bytes = n
+}
+
+func (c *C) nsPerOp() int64 {
+ if c.N <= 0 {
+ return 0
+ }
+ return c.duration.Nanoseconds() / int64(c.N)
+}
+
+func (c *C) mbPerSec() float64 {
+ if c.bytes <= 0 || c.duration <= 0 || c.N <= 0 {
+ return 0
+ }
+ return (float64(c.bytes) * float64(c.N) / 1e6) / c.duration.Seconds()
+}
+
+func (c *C) timerString() string {
+ if c.N <= 0 {
+ return fmt.Sprintf("%3.3fs", float64(c.duration.Nanoseconds())/1e9)
+ }
+ mbs := c.mbPerSec()
+ mb := ""
+ if mbs != 0 {
+ mb = fmt.Sprintf("\t%7.2f MB/s", mbs)
+ }
+ nsop := c.nsPerOp()
+ ns := fmt.Sprintf("%10d ns/op", nsop)
+ if c.N > 0 && nsop < 100 {
+ // The format specifiers here make sure that
+ // the ones digits line up for all three possible formats.
+ if nsop < 10 {
+ ns = fmt.Sprintf("%13.2f ns/op", float64(c.duration.Nanoseconds())/float64(c.N))
+ } else {
+ ns = fmt.Sprintf("%12.1f ns/op", float64(c.duration.Nanoseconds())/float64(c.N))
+ }
+ }
+ memStats := ""
+ if c.benchMem {
+ allocedBytes := fmt.Sprintf("%8d B/op", int64(c.netBytes)/int64(c.N))
+ allocs := fmt.Sprintf("%8d allocs/op", int64(c.netAllocs)/int64(c.N))
+ memStats = fmt.Sprintf("\t%s\t%s", allocedBytes, allocs)
+ }
+ return fmt.Sprintf("%8d\t%s%s%s", c.N, ns, mb, memStats)
+}
+
+func min(x, y int) int {
+ if x > y {
+ return y
+ }
+ return x
+}
+
+func max(x, y int) int {
+ if x < y {
+ return y
+ }
+ return x
+}
+
+// roundDown10 rounds a number down to the nearest power of 10.
+func roundDown10(n int) int {
+ var tens = 0
+ // tens = floor(log_10(n))
+ for n > 10 {
+ n = n / 10
+ tens++
+ }
+ // result = 10^tens
+ result := 1
+ for i := 0; i < tens; i++ {
+ result *= 10
+ }
+ return result
+}
+
+// roundUp rounds x up to a number of the form [1eX, 2eX, 5eX].
+func roundUp(n int) int {
+ base := roundDown10(n)
+ if n < (2 * base) {
+ return 2 * base
+ }
+ if n < (5 * base) {
+ return 5 * base
+ }
+ return 10 * base
+}
diff --git a/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/benchmark_test.go b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/benchmark_test.go
new file mode 100644
index 0000000000..a7cfd53f97
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/benchmark_test.go
@@ -0,0 +1,91 @@
+// These tests verify the test running logic.
+
+package check_test
+
+import (
+ . "github.com/magiconair/properties/_third_party/gopkg.in/check.v1"
+ "time"
+)
+
+var benchmarkS = Suite(&BenchmarkS{})
+
+type BenchmarkS struct{}
+
+func (s *BenchmarkS) TestCountSuite(c *C) {
+ suitesRun += 1
+}
+
+func (s *BenchmarkS) TestBasicTestTiming(c *C) {
+ helper := FixtureHelper{sleepOn: "Test1", sleep: 1000000 * time.Nanosecond}
+ output := String{}
+ runConf := RunConf{Output: &output, Verbose: true}
+ Run(&helper, &runConf)
+
+ expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test1\t0\\.001s\n" +
+ "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test2\t0\\.000s\n"
+ c.Assert(output.value, Matches, expected)
+}
+
+func (s *BenchmarkS) TestStreamTestTiming(c *C) {
+ helper := FixtureHelper{sleepOn: "SetUpSuite", sleep: 1000000 * time.Nanosecond}
+ output := String{}
+ runConf := RunConf{Output: &output, Stream: true}
+ Run(&helper, &runConf)
+
+ expected := "(?s).*\nPASS: check_test\\.go:[0-9]+: FixtureHelper\\.SetUpSuite\t *0\\.001s\n.*"
+ c.Assert(output.value, Matches, expected)
+}
+
+func (s *BenchmarkS) TestBenchmark(c *C) {
+ helper := FixtureHelper{sleep: 100000}
+ output := String{}
+ runConf := RunConf{
+ Output: &output,
+ Benchmark: true,
+ BenchmarkTime: 10000000,
+ Filter: "Benchmark1",
+ }
+ Run(&helper, &runConf)
+ c.Check(helper.calls[0], Equals, "SetUpSuite")
+ c.Check(helper.calls[1], Equals, "SetUpTest")
+ c.Check(helper.calls[2], Equals, "Benchmark1")
+ c.Check(helper.calls[3], Equals, "TearDownTest")
+ c.Check(helper.calls[4], Equals, "SetUpTest")
+ c.Check(helper.calls[5], Equals, "Benchmark1")
+ c.Check(helper.calls[6], Equals, "TearDownTest")
+ // ... and more.
+
+ expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Benchmark1\t *100\t *[12][0-9]{5} ns/op\n"
+ c.Assert(output.value, Matches, expected)
+}
+
+func (s *BenchmarkS) TestBenchmarkBytes(c *C) {
+ helper := FixtureHelper{sleep: 100000}
+ output := String{}
+ runConf := RunConf{
+ Output: &output,
+ Benchmark: true,
+ BenchmarkTime: 10000000,
+ Filter: "Benchmark2",
+ }
+ Run(&helper, &runConf)
+
+ expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Benchmark2\t *100\t *[12][0-9]{5} ns/op\t *[4-9]\\.[0-9]{2} MB/s\n"
+ c.Assert(output.value, Matches, expected)
+}
+
+func (s *BenchmarkS) TestBenchmarkMem(c *C) {
+ helper := FixtureHelper{sleep: 100000}
+ output := String{}
+ runConf := RunConf{
+ Output: &output,
+ Benchmark: true,
+ BenchmarkMem: true,
+ BenchmarkTime: 10000000,
+ Filter: "Benchmark3",
+ }
+ Run(&helper, &runConf)
+
+ expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Benchmark3\t *100\t *[12][0-9]{5} ns/op\t *[0-9]+ B/op\t *[1-9] allocs/op\n"
+ c.Assert(output.value, Matches, expected)
+}
diff --git a/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/bootstrap_test.go b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/bootstrap_test.go
new file mode 100644
index 0000000000..e5cee20ec7
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/bootstrap_test.go
@@ -0,0 +1,82 @@
+// These initial tests are for bootstrapping. They verify that we can
+// basically use the testing infrastructure itself to check if the test
+// system is working.
+//
+// These tests use will break down the test runner badly in case of
+// errors because if they simply fail, we can't be sure the developer
+// will ever see anything (because failing means the failing system
+// somehow isn't working! :-)
+//
+// Do not assume *any* internal functionality works as expected besides
+// what's actually tested here.
+
+package check_test
+
+import (
+ "fmt"
+ "github.com/magiconair/properties/_third_party/gopkg.in/check.v1"
+ "strings"
+)
+
+type BootstrapS struct{}
+
+var boostrapS = check.Suite(&BootstrapS{})
+
+func (s *BootstrapS) TestCountSuite(c *check.C) {
+ suitesRun += 1
+}
+
+func (s *BootstrapS) TestFailedAndFail(c *check.C) {
+ if c.Failed() {
+ critical("c.Failed() must be false first!")
+ }
+ c.Fail()
+ if !c.Failed() {
+ critical("c.Fail() didn't put the test in a failed state!")
+ }
+ c.Succeed()
+}
+
+func (s *BootstrapS) TestFailedAndSucceed(c *check.C) {
+ c.Fail()
+ c.Succeed()
+ if c.Failed() {
+ critical("c.Succeed() didn't put the test back in a non-failed state")
+ }
+}
+
+func (s *BootstrapS) TestLogAndGetTestLog(c *check.C) {
+ c.Log("Hello there!")
+ log := c.GetTestLog()
+ if log != "Hello there!\n" {
+ critical(fmt.Sprintf("Log() or GetTestLog() is not working! Got: %#v", log))
+ }
+}
+
+func (s *BootstrapS) TestLogfAndGetTestLog(c *check.C) {
+ c.Logf("Hello %v", "there!")
+ log := c.GetTestLog()
+ if log != "Hello there!\n" {
+ critical(fmt.Sprintf("Logf() or GetTestLog() is not working! Got: %#v", log))
+ }
+}
+
+func (s *BootstrapS) TestRunShowsErrors(c *check.C) {
+ output := String{}
+ check.Run(&FailHelper{}, &check.RunConf{Output: &output})
+ if strings.Index(output.value, "Expected failure!") == -1 {
+ critical(fmt.Sprintf("RunWithWriter() output did not contain the "+
+ "expected failure! Got: %#v",
+ output.value))
+ }
+}
+
+func (s *BootstrapS) TestRunDoesntShowSuccesses(c *check.C) {
+ output := String{}
+ check.Run(&SuccessHelper{}, &check.RunConf{Output: &output})
+ if strings.Index(output.value, "Expected success!") != -1 {
+ critical(fmt.Sprintf("RunWithWriter() output contained a successful "+
+ "test! Got: %#v",
+ output.value))
+ }
+}
diff --git a/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/check.go b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/check.go
new file mode 100644
index 0000000000..ca8c0f92de
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/check.go
@@ -0,0 +1,945 @@
+// Package check is a rich testing extension for Go's testing package.
+//
+// For details about the project, see:
+//
+// http://labix.org/gocheck
+//
+package check
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "math/rand"
+ "os"
+ "path"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+// -----------------------------------------------------------------------
+// Internal type which deals with suite method calling.
+
+const (
+ fixtureKd = iota
+ testKd
+)
+
+type funcKind int
+
+const (
+ succeededSt = iota
+ failedSt
+ skippedSt
+ panickedSt
+ fixturePanickedSt
+ missedSt
+)
+
+type funcStatus int
+
+// A method value can't reach its own Method structure.
+type methodType struct {
+ reflect.Value
+ Info reflect.Method
+}
+
+func newMethod(receiver reflect.Value, i int) *methodType {
+ return &methodType{receiver.Method(i), receiver.Type().Method(i)}
+}
+
+func (method *methodType) PC() uintptr {
+ return method.Info.Func.Pointer()
+}
+
+func (method *methodType) suiteName() string {
+ t := method.Info.Type.In(0)
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ return t.Name()
+}
+
+func (method *methodType) String() string {
+ return method.suiteName() + "." + method.Info.Name
+}
+
+func (method *methodType) matches(re *regexp.Regexp) bool {
+ return (re.MatchString(method.Info.Name) ||
+ re.MatchString(method.suiteName()) ||
+ re.MatchString(method.String()))
+}
+
+type C struct {
+ method *methodType
+ kind funcKind
+ testName string
+ status funcStatus
+ logb *logger
+ logw io.Writer
+ done chan *C
+ reason string
+ mustFail bool
+ tempDir *tempDir
+ benchMem bool
+ startTime time.Time
+ timer
+}
+
+func (c *C) stopNow() {
+ runtime.Goexit()
+}
+
+// logger is a concurrency safe byte.Buffer
+type logger struct {
+ sync.Mutex
+ writer bytes.Buffer
+}
+
+func (l *logger) Write(buf []byte) (int, error) {
+ l.Lock()
+ defer l.Unlock()
+ return l.writer.Write(buf)
+}
+
+func (l *logger) WriteTo(w io.Writer) (int64, error) {
+ l.Lock()
+ defer l.Unlock()
+ return l.writer.WriteTo(w)
+}
+
+func (l *logger) String() string {
+ l.Lock()
+ defer l.Unlock()
+ return l.writer.String()
+}
+
+// -----------------------------------------------------------------------
+// Handling of temporary files and directories.
+
+type tempDir struct {
+ sync.Mutex
+ path string
+ counter int
+}
+
+func (td *tempDir) newPath() string {
+ td.Lock()
+ defer td.Unlock()
+ if td.path == "" {
+ var err error
+ for i := 0; i != 100; i++ {
+ path := fmt.Sprintf("%s%ccheck-%d", os.TempDir(), os.PathSeparator, rand.Int())
+ if err = os.Mkdir(path, 0700); err == nil {
+ td.path = path
+ break
+ }
+ }
+ if td.path == "" {
+ panic("Couldn't create temporary directory: " + err.Error())
+ }
+ }
+ result := filepath.Join(td.path, strconv.Itoa(td.counter))
+ td.counter += 1
+ return result
+}
+
+func (td *tempDir) removeAll() {
+ td.Lock()
+ defer td.Unlock()
+ if td.path != "" {
+ err := os.RemoveAll(td.path)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "WARNING: Error cleaning up temporaries: "+err.Error())
+ }
+ }
+}
+
+// Create a new temporary directory which is automatically removed after
+// the suite finishes running.
+func (c *C) MkDir() string {
+ path := c.tempDir.newPath()
+ if err := os.Mkdir(path, 0700); err != nil {
+ panic(fmt.Sprintf("Couldn't create temporary directory %s: %s", path, err.Error()))
+ }
+ return path
+}
+
+// -----------------------------------------------------------------------
+// Low-level logging functions.
+
+func (c *C) log(args ...interface{}) {
+ c.writeLog([]byte(fmt.Sprint(args...) + "\n"))
+}
+
+func (c *C) logf(format string, args ...interface{}) {
+ c.writeLog([]byte(fmt.Sprintf(format+"\n", args...)))
+}
+
+func (c *C) logNewLine() {
+ c.writeLog([]byte{'\n'})
+}
+
+func (c *C) writeLog(buf []byte) {
+ c.logb.Write(buf)
+ if c.logw != nil {
+ c.logw.Write(buf)
+ }
+}
+
+func hasStringOrError(x interface{}) (ok bool) {
+ _, ok = x.(fmt.Stringer)
+ if ok {
+ return
+ }
+ _, ok = x.(error)
+ return
+}
+
+func (c *C) logValue(label string, value interface{}) {
+ if label == "" {
+ if hasStringOrError(value) {
+ c.logf("... %#v (%q)", value, value)
+ } else {
+ c.logf("... %#v", value)
+ }
+ } else if value == nil {
+ c.logf("... %s = nil", label)
+ } else {
+ if hasStringOrError(value) {
+ fv := fmt.Sprintf("%#v", value)
+ qv := fmt.Sprintf("%q", value)
+ if fv != qv {
+ c.logf("... %s %s = %s (%s)", label, reflect.TypeOf(value), fv, qv)
+ return
+ }
+ }
+ if s, ok := value.(string); ok && isMultiLine(s) {
+ c.logf(`... %s %s = "" +`, label, reflect.TypeOf(value))
+ c.logMultiLine(s)
+ } else {
+ c.logf("... %s %s = %#v", label, reflect.TypeOf(value), value)
+ }
+ }
+}
+
+func (c *C) logMultiLine(s string) {
+ b := make([]byte, 0, len(s)*2)
+ i := 0
+ n := len(s)
+ for i < n {
+ j := i + 1
+ for j < n && s[j-1] != '\n' {
+ j++
+ }
+ b = append(b, "... "...)
+ b = strconv.AppendQuote(b, s[i:j])
+ if j < n {
+ b = append(b, " +"...)
+ }
+ b = append(b, '\n')
+ i = j
+ }
+ c.writeLog(b)
+}
+
+func isMultiLine(s string) bool {
+ for i := 0; i+1 < len(s); i++ {
+ if s[i] == '\n' {
+ return true
+ }
+ }
+ return false
+}
+
+func (c *C) logString(issue string) {
+ c.log("... ", issue)
+}
+
+func (c *C) logCaller(skip int) {
+ // This is a bit heavier than it ought to be.
+ skip += 1 // Our own frame.
+ pc, callerFile, callerLine, ok := runtime.Caller(skip)
+ if !ok {
+ return
+ }
+ var testFile string
+ var testLine int
+ testFunc := runtime.FuncForPC(c.method.PC())
+ if runtime.FuncForPC(pc) != testFunc {
+ for {
+ skip += 1
+ if pc, file, line, ok := runtime.Caller(skip); ok {
+ // Note that the test line may be different on
+ // distinct calls for the same test. Showing
+ // the "internal" line is helpful when debugging.
+ if runtime.FuncForPC(pc) == testFunc {
+ testFile, testLine = file, line
+ break
+ }
+ } else {
+ break
+ }
+ }
+ }
+ if testFile != "" && (testFile != callerFile || testLine != callerLine) {
+ c.logCode(testFile, testLine)
+ }
+ c.logCode(callerFile, callerLine)
+}
+
+func (c *C) logCode(path string, line int) {
+ c.logf("%s:%d:", nicePath(path), line)
+ code, err := printLine(path, line)
+ if code == "" {
+ code = "..." // XXX Open the file and take the raw line.
+ if err != nil {
+ code += err.Error()
+ }
+ }
+ c.log(indent(code, " "))
+}
+
+var valueGo = filepath.Join("reflect", "value.go")
+var asmGo = filepath.Join("runtime", "asm_")
+
+func (c *C) logPanic(skip int, value interface{}) {
+ skip++ // Our own frame.
+ initialSkip := skip
+ for ; ; skip++ {
+ if pc, file, line, ok := runtime.Caller(skip); ok {
+ if skip == initialSkip {
+ c.logf("... Panic: %s (PC=0x%X)\n", value, pc)
+ }
+ name := niceFuncName(pc)
+ path := nicePath(file)
+ if strings.Contains(path, "/gopkg.in/check.v") {
+ continue
+ }
+ if name == "Value.call" && strings.HasSuffix(path, valueGo) {
+ continue
+ }
+ if name == "call16" && strings.Contains(path, asmGo) {
+ continue
+ }
+ c.logf("%s:%d\n in %s", nicePath(file), line, name)
+ } else {
+ break
+ }
+ }
+}
+
+func (c *C) logSoftPanic(issue string) {
+ c.log("... Panic: ", issue)
+}
+
+func (c *C) logArgPanic(method *methodType, expectedType string) {
+ c.logf("... Panic: %s argument should be %s",
+ niceFuncName(method.PC()), expectedType)
+}
+
+// -----------------------------------------------------------------------
+// Some simple formatting helpers.
+
+var initWD, initWDErr = os.Getwd()
+
+func init() {
+ if initWDErr == nil {
+ initWD = strings.Replace(initWD, "\\", "/", -1) + "/"
+ }
+}
+
+func nicePath(path string) string {
+ if initWDErr == nil {
+ if strings.HasPrefix(path, initWD) {
+ return path[len(initWD):]
+ }
+ }
+ return path
+}
+
+func niceFuncPath(pc uintptr) string {
+ function := runtime.FuncForPC(pc)
+ if function != nil {
+ filename, line := function.FileLine(pc)
+ return fmt.Sprintf("%s:%d", nicePath(filename), line)
+ }
+ return ""
+}
+
+func niceFuncName(pc uintptr) string {
+ function := runtime.FuncForPC(pc)
+ if function != nil {
+ name := path.Base(function.Name())
+ if i := strings.Index(name, "."); i > 0 {
+ name = name[i+1:]
+ }
+ if strings.HasPrefix(name, "(*") {
+ if i := strings.Index(name, ")"); i > 0 {
+ name = name[2:i] + name[i+1:]
+ }
+ }
+ if i := strings.LastIndex(name, ".*"); i != -1 {
+ name = name[:i] + "." + name[i+2:]
+ }
+ if i := strings.LastIndex(name, "·"); i != -1 {
+ name = name[:i] + "." + name[i+2:]
+ }
+ return name
+ }
+ return ""
+}
+
+// -----------------------------------------------------------------------
+// Result tracker to aggregate call results.
+
+type Result struct {
+ Succeeded int
+ Failed int
+ Skipped int
+ Panicked int
+ FixturePanicked int
+ ExpectedFailures int
+ Missed int // Not even tried to run, related to a panic in the fixture.
+ RunError error // Houston, we've got a problem.
+ WorkDir string // If KeepWorkDir is true
+}
+
+type resultTracker struct {
+ result Result
+ _lastWasProblem bool
+ _waiting int
+ _missed int
+ _expectChan chan *C
+ _doneChan chan *C
+ _stopChan chan bool
+}
+
+func newResultTracker() *resultTracker {
+ return &resultTracker{_expectChan: make(chan *C), // Synchronous
+ _doneChan: make(chan *C, 32), // Asynchronous
+ _stopChan: make(chan bool)} // Synchronous
+}
+
+func (tracker *resultTracker) start() {
+ go tracker._loopRoutine()
+}
+
+func (tracker *resultTracker) waitAndStop() {
+ <-tracker._stopChan
+}
+
+func (tracker *resultTracker) expectCall(c *C) {
+ tracker._expectChan <- c
+}
+
+func (tracker *resultTracker) callDone(c *C) {
+ tracker._doneChan <- c
+}
+
+func (tracker *resultTracker) _loopRoutine() {
+ for {
+ var c *C
+ if tracker._waiting > 0 {
+ // Calls still running. Can't stop.
+ select {
+ // XXX Reindent this (not now to make diff clear)
+ case c = <-tracker._expectChan:
+ tracker._waiting += 1
+ case c = <-tracker._doneChan:
+ tracker._waiting -= 1
+ switch c.status {
+ case succeededSt:
+ if c.kind == testKd {
+ if c.mustFail {
+ tracker.result.ExpectedFailures++
+ } else {
+ tracker.result.Succeeded++
+ }
+ }
+ case failedSt:
+ tracker.result.Failed++
+ case panickedSt:
+ if c.kind == fixtureKd {
+ tracker.result.FixturePanicked++
+ } else {
+ tracker.result.Panicked++
+ }
+ case fixturePanickedSt:
+ // Track it as missed, since the panic
+ // was on the fixture, not on the test.
+ tracker.result.Missed++
+ case missedSt:
+ tracker.result.Missed++
+ case skippedSt:
+ if c.kind == testKd {
+ tracker.result.Skipped++
+ }
+ }
+ }
+ } else {
+ // No calls. Can stop, but no done calls here.
+ select {
+ case tracker._stopChan <- true:
+ return
+ case c = <-tracker._expectChan:
+ tracker._waiting += 1
+ case c = <-tracker._doneChan:
+ panic("Tracker got an unexpected done call.")
+ }
+ }
+ }
+}
+
+// -----------------------------------------------------------------------
+// The underlying suite runner.
+
+type suiteRunner struct {
+ suite interface{}
+ setUpSuite, tearDownSuite *methodType
+ setUpTest, tearDownTest *methodType
+ tests []*methodType
+ tracker *resultTracker
+ tempDir *tempDir
+ keepDir bool
+ output *outputWriter
+ reportedProblemLast bool
+ benchTime time.Duration
+ benchMem bool
+}
+
+type RunConf struct {
+ Output io.Writer
+ Stream bool
+ Verbose bool
+ Filter string
+ Benchmark bool
+ BenchmarkTime time.Duration // Defaults to 1 second
+ BenchmarkMem bool
+ KeepWorkDir bool
+}
+
+// Create a new suiteRunner able to run all methods in the given suite.
+func newSuiteRunner(suite interface{}, runConf *RunConf) *suiteRunner {
+ var conf RunConf
+ if runConf != nil {
+ conf = *runConf
+ }
+ if conf.Output == nil {
+ conf.Output = os.Stdout
+ }
+ if conf.Benchmark {
+ conf.Verbose = true
+ }
+
+ suiteType := reflect.TypeOf(suite)
+ suiteNumMethods := suiteType.NumMethod()
+ suiteValue := reflect.ValueOf(suite)
+
+ runner := &suiteRunner{
+ suite: suite,
+ output: newOutputWriter(conf.Output, conf.Stream, conf.Verbose),
+ tracker: newResultTracker(),
+ benchTime: conf.BenchmarkTime,
+ benchMem: conf.BenchmarkMem,
+ tempDir: &tempDir{},
+ keepDir: conf.KeepWorkDir,
+ tests: make([]*methodType, 0, suiteNumMethods),
+ }
+ if runner.benchTime == 0 {
+ runner.benchTime = 1 * time.Second
+ }
+
+ var filterRegexp *regexp.Regexp
+ if conf.Filter != "" {
+ if regexp, err := regexp.Compile(conf.Filter); err != nil {
+ msg := "Bad filter expression: " + err.Error()
+ runner.tracker.result.RunError = errors.New(msg)
+ return runner
+ } else {
+ filterRegexp = regexp
+ }
+ }
+
+ for i := 0; i != suiteNumMethods; i++ {
+ method := newMethod(suiteValue, i)
+ switch method.Info.Name {
+ case "SetUpSuite":
+ runner.setUpSuite = method
+ case "TearDownSuite":
+ runner.tearDownSuite = method
+ case "SetUpTest":
+ runner.setUpTest = method
+ case "TearDownTest":
+ runner.tearDownTest = method
+ default:
+ prefix := "Test"
+ if conf.Benchmark {
+ prefix = "Benchmark"
+ }
+ if !strings.HasPrefix(method.Info.Name, prefix) {
+ continue
+ }
+ if filterRegexp == nil || method.matches(filterRegexp) {
+ runner.tests = append(runner.tests, method)
+ }
+ }
+ }
+ return runner
+}
+
+// Run all methods in the given suite.
+func (runner *suiteRunner) run() *Result {
+ if runner.tracker.result.RunError == nil && len(runner.tests) > 0 {
+ runner.tracker.start()
+ if runner.checkFixtureArgs() {
+ c := runner.runFixture(runner.setUpSuite, "", nil)
+ if c == nil || c.status == succeededSt {
+ for i := 0; i != len(runner.tests); i++ {
+ c := runner.runTest(runner.tests[i])
+ if c.status == fixturePanickedSt {
+ runner.skipTests(missedSt, runner.tests[i+1:])
+ break
+ }
+ }
+ } else if c != nil && c.status == skippedSt {
+ runner.skipTests(skippedSt, runner.tests)
+ } else {
+ runner.skipTests(missedSt, runner.tests)
+ }
+ runner.runFixture(runner.tearDownSuite, "", nil)
+ } else {
+ runner.skipTests(missedSt, runner.tests)
+ }
+ runner.tracker.waitAndStop()
+ if runner.keepDir {
+ runner.tracker.result.WorkDir = runner.tempDir.path
+ } else {
+ runner.tempDir.removeAll()
+ }
+ }
+ return &runner.tracker.result
+}
+
+// Create a call object with the given suite method, and fork a
+// goroutine with the provided dispatcher for running it.
+func (runner *suiteRunner) forkCall(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C {
+ var logw io.Writer
+ if runner.output.Stream {
+ logw = runner.output
+ }
+ if logb == nil {
+ logb = new(logger)
+ }
+ c := &C{
+ method: method,
+ kind: kind,
+ testName: testName,
+ logb: logb,
+ logw: logw,
+ tempDir: runner.tempDir,
+ done: make(chan *C, 1),
+ timer: timer{benchTime: runner.benchTime},
+ startTime: time.Now(),
+ benchMem: runner.benchMem,
+ }
+ runner.tracker.expectCall(c)
+ go (func() {
+ runner.reportCallStarted(c)
+ defer runner.callDone(c)
+ dispatcher(c)
+ })()
+ return c
+}
+
+// Same as forkCall(), but wait for call to finish before returning.
+func (runner *suiteRunner) runFunc(method *methodType, kind funcKind, testName string, logb *logger, dispatcher func(c *C)) *C {
+ c := runner.forkCall(method, kind, testName, logb, dispatcher)
+ <-c.done
+ return c
+}
+
+// Handle a finished call. If there were any panics, update the call status
+// accordingly. Then, mark the call as done and report to the tracker.
+func (runner *suiteRunner) callDone(c *C) {
+ value := recover()
+ if value != nil {
+ switch v := value.(type) {
+ case *fixturePanic:
+ if v.status == skippedSt {
+ c.status = skippedSt
+ } else {
+ c.logSoftPanic("Fixture has panicked (see related PANIC)")
+ c.status = fixturePanickedSt
+ }
+ default:
+ c.logPanic(1, value)
+ c.status = panickedSt
+ }
+ }
+ if c.mustFail {
+ switch c.status {
+ case failedSt:
+ c.status = succeededSt
+ case succeededSt:
+ c.status = failedSt
+ c.logString("Error: Test succeeded, but was expected to fail")
+ c.logString("Reason: " + c.reason)
+ }
+ }
+
+ runner.reportCallDone(c)
+ c.done <- c
+}
+
+// Runs a fixture call synchronously. The fixture will still be run in a
+// goroutine like all suite methods, but this method will not return
+// while the fixture goroutine is not done, because the fixture must be
+// run in a desired order.
+func (runner *suiteRunner) runFixture(method *methodType, testName string, logb *logger) *C {
+ if method != nil {
+ c := runner.runFunc(method, fixtureKd, testName, logb, func(c *C) {
+ c.ResetTimer()
+ c.StartTimer()
+ defer c.StopTimer()
+ c.method.Call([]reflect.Value{reflect.ValueOf(c)})
+ })
+ return c
+ }
+ return nil
+}
+
+// Run the fixture method with runFixture(), but panic with a fixturePanic{}
+// in case the fixture method panics. This makes it easier to track the
+// fixture panic together with other call panics within forkTest().
+func (runner *suiteRunner) runFixtureWithPanic(method *methodType, testName string, logb *logger, skipped *bool) *C {
+ if skipped != nil && *skipped {
+ return nil
+ }
+ c := runner.runFixture(method, testName, logb)
+ if c != nil && c.status != succeededSt {
+ if skipped != nil {
+ *skipped = c.status == skippedSt
+ }
+ panic(&fixturePanic{c.status, method})
+ }
+ return c
+}
+
+type fixturePanic struct {
+ status funcStatus
+ method *methodType
+}
+
+// Run the suite test method, together with the test-specific fixture,
+// asynchronously.
+func (runner *suiteRunner) forkTest(method *methodType) *C {
+ testName := method.String()
+ return runner.forkCall(method, testKd, testName, nil, func(c *C) {
+ var skipped bool
+ defer runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, &skipped)
+ defer c.StopTimer()
+ benchN := 1
+ for {
+ runner.runFixtureWithPanic(runner.setUpTest, testName, c.logb, &skipped)
+ mt := c.method.Type()
+ if mt.NumIn() != 1 || mt.In(0) != reflect.TypeOf(c) {
+ // Rather than a plain panic, provide a more helpful message when
+ // the argument type is incorrect.
+ c.status = panickedSt
+ c.logArgPanic(c.method, "*check.C")
+ return
+ }
+ if strings.HasPrefix(c.method.Info.Name, "Test") {
+ c.ResetTimer()
+ c.StartTimer()
+ c.method.Call([]reflect.Value{reflect.ValueOf(c)})
+ return
+ }
+ if !strings.HasPrefix(c.method.Info.Name, "Benchmark") {
+ panic("unexpected method prefix: " + c.method.Info.Name)
+ }
+
+ runtime.GC()
+ c.N = benchN
+ c.ResetTimer()
+ c.StartTimer()
+ c.method.Call([]reflect.Value{reflect.ValueOf(c)})
+ c.StopTimer()
+ if c.status != succeededSt || c.duration >= c.benchTime || benchN >= 1e9 {
+ return
+ }
+ perOpN := int(1e9)
+ if c.nsPerOp() != 0 {
+ perOpN = int(c.benchTime.Nanoseconds() / c.nsPerOp())
+ }
+
+ // Logic taken from the stock testing package:
+ // - Run more iterations than we think we'll need for a second (1.5x).
+ // - Don't grow too fast in case we had timing errors previously.
+ // - Be sure to run at least one more than last time.
+ benchN = max(min(perOpN+perOpN/2, 100*benchN), benchN+1)
+ benchN = roundUp(benchN)
+
+ skipped = true // Don't run the deferred one if this panics.
+ runner.runFixtureWithPanic(runner.tearDownTest, testName, nil, nil)
+ skipped = false
+ }
+ })
+}
+
+// Same as forkTest(), but wait for the test to finish before returning.
+func (runner *suiteRunner) runTest(method *methodType) *C {
+ c := runner.forkTest(method)
+ <-c.done
+ return c
+}
+
+// Helper to mark tests as skipped or missed. A bit heavy for what
+// it does, but it enables homogeneous handling of tracking, including
+// nice verbose output.
+func (runner *suiteRunner) skipTests(status funcStatus, methods []*methodType) {
+ for _, method := range methods {
+ runner.runFunc(method, testKd, "", nil, func(c *C) {
+ c.status = status
+ })
+ }
+}
+
+// Verify if the fixture arguments are *check.C. In case of errors,
+// log the error as a panic in the fixture method call, and return false.
+func (runner *suiteRunner) checkFixtureArgs() bool {
+ succeeded := true
+ argType := reflect.TypeOf(&C{})
+ for _, method := range []*methodType{runner.setUpSuite, runner.tearDownSuite, runner.setUpTest, runner.tearDownTest} {
+ if method != nil {
+ mt := method.Type()
+ if mt.NumIn() != 1 || mt.In(0) != argType {
+ succeeded = false
+ runner.runFunc(method, fixtureKd, "", nil, func(c *C) {
+ c.logArgPanic(method, "*check.C")
+ c.status = panickedSt
+ })
+ }
+ }
+ }
+ return succeeded
+}
+
+func (runner *suiteRunner) reportCallStarted(c *C) {
+ runner.output.WriteCallStarted("START", c)
+}
+
+func (runner *suiteRunner) reportCallDone(c *C) {
+ runner.tracker.callDone(c)
+ switch c.status {
+ case succeededSt:
+ if c.mustFail {
+ runner.output.WriteCallSuccess("FAIL EXPECTED", c)
+ } else {
+ runner.output.WriteCallSuccess("PASS", c)
+ }
+ case skippedSt:
+ runner.output.WriteCallSuccess("SKIP", c)
+ case failedSt:
+ runner.output.WriteCallProblem("FAIL", c)
+ case panickedSt:
+ runner.output.WriteCallProblem("PANIC", c)
+ case fixturePanickedSt:
+ // That's a testKd call reporting that its fixture
+ // has panicked. The fixture call which caused the
+ // panic itself was tracked above. We'll report to
+ // aid debugging.
+ runner.output.WriteCallProblem("PANIC", c)
+ case missedSt:
+ runner.output.WriteCallSuccess("MISS", c)
+ }
+}
+
+// -----------------------------------------------------------------------
+// Output writer manages atomic output writing according to settings.
+
+type outputWriter struct {
+ m sync.Mutex
+ writer io.Writer
+ wroteCallProblemLast bool
+ Stream bool
+ Verbose bool
+}
+
+func newOutputWriter(writer io.Writer, stream, verbose bool) *outputWriter {
+ return &outputWriter{writer: writer, Stream: stream, Verbose: verbose}
+}
+
+func (ow *outputWriter) Write(content []byte) (n int, err error) {
+ ow.m.Lock()
+ n, err = ow.writer.Write(content)
+ ow.m.Unlock()
+ return
+}
+
+func (ow *outputWriter) WriteCallStarted(label string, c *C) {
+ if ow.Stream {
+ header := renderCallHeader(label, c, "", "\n")
+ ow.m.Lock()
+ ow.writer.Write([]byte(header))
+ ow.m.Unlock()
+ }
+}
+
+func (ow *outputWriter) WriteCallProblem(label string, c *C) {
+ var prefix string
+ if !ow.Stream {
+ prefix = "\n-----------------------------------" +
+ "-----------------------------------\n"
+ }
+ header := renderCallHeader(label, c, prefix, "\n\n")
+ ow.m.Lock()
+ ow.wroteCallProblemLast = true
+ ow.writer.Write([]byte(header))
+ if !ow.Stream {
+ c.logb.WriteTo(ow.writer)
+ }
+ ow.m.Unlock()
+}
+
+func (ow *outputWriter) WriteCallSuccess(label string, c *C) {
+ if ow.Stream || (ow.Verbose && c.kind == testKd) {
+ // TODO Use a buffer here.
+ var suffix string
+ if c.reason != "" {
+ suffix = " (" + c.reason + ")"
+ }
+ if c.status == succeededSt {
+ suffix += "\t" + c.timerString()
+ }
+ suffix += "\n"
+ if ow.Stream {
+ suffix += "\n"
+ }
+ header := renderCallHeader(label, c, "", suffix)
+ ow.m.Lock()
+ // Resist temptation of using line as prefix above due to race.
+ if !ow.Stream && ow.wroteCallProblemLast {
+ header = "\n-----------------------------------" +
+ "-----------------------------------\n" +
+ header
+ }
+ ow.wroteCallProblemLast = false
+ ow.writer.Write([]byte(header))
+ ow.m.Unlock()
+ }
+}
+
+func renderCallHeader(label string, c *C, prefix, suffix string) string {
+ pc := c.method.PC()
+ return fmt.Sprintf("%s%s: %s: %s%s", prefix, label, niceFuncPath(pc),
+ niceFuncName(pc), suffix)
+}
diff --git a/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/check_test.go b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/check_test.go
new file mode 100644
index 0000000000..2fb8f897c9
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/check_test.go
@@ -0,0 +1,207 @@
+// This file contains just a few generic helpers which are used by the
+// other test files.
+
+package check_test
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "regexp"
+ "runtime"
+ "testing"
+ "time"
+
+ "github.com/magiconair/properties/_third_party/gopkg.in/check.v1"
+)
+
+// We count the number of suites run at least to get a vague hint that the
+// test suite is behaving as it should. Otherwise a bug introduced at the
+// very core of the system could go unperceived.
+const suitesRunExpected = 8
+
+var suitesRun int = 0
+
+func Test(t *testing.T) {
+ check.TestingT(t)
+ if suitesRun != suitesRunExpected && flag.Lookup("check.f").Value.String() == "" {
+ critical(fmt.Sprintf("Expected %d suites to run rather than %d",
+ suitesRunExpected, suitesRun))
+ }
+}
+
+// -----------------------------------------------------------------------
+// Helper functions.
+
+// Break down badly. This is used in test cases which can't yet assume
+// that the fundamental bits are working.
+func critical(error string) {
+ fmt.Fprintln(os.Stderr, "CRITICAL: "+error)
+ os.Exit(1)
+}
+
+// Return the file line where it's called.
+func getMyLine() int {
+ if _, _, line, ok := runtime.Caller(1); ok {
+ return line
+ }
+ return -1
+}
+
+// -----------------------------------------------------------------------
+// Helper type implementing a basic io.Writer for testing output.
+
+// Type implementing the io.Writer interface for analyzing output.
+type String struct {
+ value string
+}
+
+// The only function required by the io.Writer interface. Will append
+// written data to the String.value string.
+func (s *String) Write(p []byte) (n int, err error) {
+ s.value += string(p)
+ return len(p), nil
+}
+
+// Trivial wrapper to test errors happening on a different file
+// than the test itself.
+func checkEqualWrapper(c *check.C, obtained, expected interface{}) (result bool, line int) {
+ return c.Check(obtained, check.Equals, expected), getMyLine()
+}
+
+// -----------------------------------------------------------------------
+// Helper suite for testing basic fail behavior.
+
+type FailHelper struct {
+ testLine int
+}
+
+func (s *FailHelper) TestLogAndFail(c *check.C) {
+ s.testLine = getMyLine() - 1
+ c.Log("Expected failure!")
+ c.Fail()
+}
+
+// -----------------------------------------------------------------------
+// Helper suite for testing basic success behavior.
+
+type SuccessHelper struct{}
+
+func (s *SuccessHelper) TestLogAndSucceed(c *check.C) {
+ c.Log("Expected success!")
+}
+
+// -----------------------------------------------------------------------
+// Helper suite for testing ordering and behavior of fixture.
+
+type FixtureHelper struct {
+ calls []string
+ panicOn string
+ skip bool
+ skipOnN int
+ sleepOn string
+ sleep time.Duration
+ bytes int64
+}
+
+func (s *FixtureHelper) trace(name string, c *check.C) {
+ s.calls = append(s.calls, name)
+ if name == s.panicOn {
+ panic(name)
+ }
+ if s.sleep > 0 && s.sleepOn == name {
+ time.Sleep(s.sleep)
+ }
+ if s.skip && s.skipOnN == len(s.calls)-1 {
+ c.Skip("skipOnN == n")
+ }
+}
+
+func (s *FixtureHelper) SetUpSuite(c *check.C) {
+ s.trace("SetUpSuite", c)
+}
+
+func (s *FixtureHelper) TearDownSuite(c *check.C) {
+ s.trace("TearDownSuite", c)
+}
+
+func (s *FixtureHelper) SetUpTest(c *check.C) {
+ s.trace("SetUpTest", c)
+}
+
+func (s *FixtureHelper) TearDownTest(c *check.C) {
+ s.trace("TearDownTest", c)
+}
+
+func (s *FixtureHelper) Test1(c *check.C) {
+ s.trace("Test1", c)
+}
+
+func (s *FixtureHelper) Test2(c *check.C) {
+ s.trace("Test2", c)
+}
+
+func (s *FixtureHelper) Benchmark1(c *check.C) {
+ s.trace("Benchmark1", c)
+ for i := 0; i < c.N; i++ {
+ time.Sleep(s.sleep)
+ }
+}
+
+func (s *FixtureHelper) Benchmark2(c *check.C) {
+ s.trace("Benchmark2", c)
+ c.SetBytes(1024)
+ for i := 0; i < c.N; i++ {
+ time.Sleep(s.sleep)
+ }
+}
+
+func (s *FixtureHelper) Benchmark3(c *check.C) {
+ var x []int64
+ s.trace("Benchmark3", c)
+ for i := 0; i < c.N; i++ {
+ time.Sleep(s.sleep)
+ x = make([]int64, 5)
+ _ = x
+ }
+}
+
+// -----------------------------------------------------------------------
+// Helper which checks the state of the test and ensures that it matches
+// the given expectations. Depends on c.Errorf() working, so shouldn't
+// be used to test this one function.
+
+type expectedState struct {
+ name string
+ result interface{}
+ failed bool
+ log string
+}
+
+// Verify the state of the test. Note that since this also verifies if
+// the test is supposed to be in a failed state, no other checks should
+// be done in addition to what is being tested.
+func checkState(c *check.C, result interface{}, expected *expectedState) {
+ failed := c.Failed()
+ c.Succeed()
+ log := c.GetTestLog()
+ matched, matchError := regexp.MatchString("^"+expected.log+"$", log)
+ if matchError != nil {
+ c.Errorf("Error in matching expression used in testing %s",
+ expected.name)
+ } else if !matched {
+ c.Errorf("%s logged:\n----------\n%s----------\n\nExpected:\n----------\n%s\n----------",
+ expected.name, log, expected.log)
+ }
+ if result != expected.result {
+ c.Errorf("%s returned %#v rather than %#v",
+ expected.name, result, expected.result)
+ }
+ if failed != expected.failed {
+ if failed {
+ c.Errorf("%s has failed when it shouldn't", expected.name)
+ } else {
+ c.Errorf("%s has not failed when it should", expected.name)
+ }
+ }
+}
diff --git a/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/checkers.go b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/checkers.go
new file mode 100644
index 0000000000..bac338729c
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/checkers.go
@@ -0,0 +1,458 @@
+package check
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+)
+
+// -----------------------------------------------------------------------
+// CommentInterface and Commentf helper, to attach extra information to checks.
+
+type comment struct {
+ format string
+ args []interface{}
+}
+
+// Commentf returns an infomational value to use with Assert or Check calls.
+// If the checker test fails, the provided arguments will be passed to
+// fmt.Sprintf, and will be presented next to the logged failure.
+//
+// For example:
+//
+// c.Assert(v, Equals, 42, Commentf("Iteration #%d failed.", i))
+//
+// Note that if the comment is constant, a better option is to
+// simply use a normal comment right above or next to the line, as
+// it will also get printed with any errors:
+//
+// c.Assert(l, Equals, 8192) // Ensure buffer size is correct (bug #123)
+//
+func Commentf(format string, args ...interface{}) CommentInterface {
+ return &comment{format, args}
+}
+
+// CommentInterface must be implemented by types that attach extra
+// information to failed checks. See the Commentf function for details.
+type CommentInterface interface {
+ CheckCommentString() string
+}
+
+func (c *comment) CheckCommentString() string {
+ return fmt.Sprintf(c.format, c.args...)
+}
+
+// -----------------------------------------------------------------------
+// The Checker interface.
+
+// The Checker interface must be provided by checkers used with
+// the Assert and Check verification methods.
+type Checker interface {
+ Info() *CheckerInfo
+ Check(params []interface{}, names []string) (result bool, error string)
+}
+
+// See the Checker interface.
+type CheckerInfo struct {
+ Name string
+ Params []string
+}
+
+func (info *CheckerInfo) Info() *CheckerInfo {
+ return info
+}
+
+// -----------------------------------------------------------------------
+// Not checker logic inverter.
+
+// The Not checker inverts the logic of the provided checker. The
+// resulting checker will succeed where the original one failed, and
+// vice-versa.
+//
+// For example:
+//
+// c.Assert(a, Not(Equals), b)
+//
+func Not(checker Checker) Checker {
+ return ¬Checker{checker}
+}
+
+type notChecker struct {
+ sub Checker
+}
+
+func (checker *notChecker) Info() *CheckerInfo {
+ info := *checker.sub.Info()
+ info.Name = "Not(" + info.Name + ")"
+ return &info
+}
+
+func (checker *notChecker) Check(params []interface{}, names []string) (result bool, error string) {
+ result, error = checker.sub.Check(params, names)
+ result = !result
+ return
+}
+
+// -----------------------------------------------------------------------
+// IsNil checker.
+
+type isNilChecker struct {
+ *CheckerInfo
+}
+
+// The IsNil checker tests whether the obtained value is nil.
+//
+// For example:
+//
+// c.Assert(err, IsNil)
+//
+var IsNil Checker = &isNilChecker{
+ &CheckerInfo{Name: "IsNil", Params: []string{"value"}},
+}
+
+func (checker *isNilChecker) Check(params []interface{}, names []string) (result bool, error string) {
+ return isNil(params[0]), ""
+}
+
+func isNil(obtained interface{}) (result bool) {
+ if obtained == nil {
+ result = true
+ } else {
+ switch v := reflect.ValueOf(obtained); v.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return v.IsNil()
+ }
+ }
+ return
+}
+
+// -----------------------------------------------------------------------
+// NotNil checker. Alias for Not(IsNil), since it's so common.
+
+type notNilChecker struct {
+ *CheckerInfo
+}
+
+// The NotNil checker verifies that the obtained value is not nil.
+//
+// For example:
+//
+// c.Assert(iface, NotNil)
+//
+// This is an alias for Not(IsNil), made available since it's a
+// fairly common check.
+//
+var NotNil Checker = ¬NilChecker{
+ &CheckerInfo{Name: "NotNil", Params: []string{"value"}},
+}
+
+func (checker *notNilChecker) Check(params []interface{}, names []string) (result bool, error string) {
+ return !isNil(params[0]), ""
+}
+
+// -----------------------------------------------------------------------
+// Equals checker.
+
+type equalsChecker struct {
+ *CheckerInfo
+}
+
+// The Equals checker verifies that the obtained value is equal to
+// the expected value, according to usual Go semantics for ==.
+//
+// For example:
+//
+// c.Assert(value, Equals, 42)
+//
+var Equals Checker = &equalsChecker{
+ &CheckerInfo{Name: "Equals", Params: []string{"obtained", "expected"}},
+}
+
+func (checker *equalsChecker) Check(params []interface{}, names []string) (result bool, error string) {
+ defer func() {
+ if v := recover(); v != nil {
+ result = false
+ error = fmt.Sprint(v)
+ }
+ }()
+ return params[0] == params[1], ""
+}
+
+// -----------------------------------------------------------------------
+// DeepEquals checker.
+
+type deepEqualsChecker struct {
+ *CheckerInfo
+}
+
+// The DeepEquals checker verifies that the obtained value is deep-equal to
+// the expected value. The check will work correctly even when facing
+// slices, interfaces, and values of different types (which always fail
+// the test).
+//
+// For example:
+//
+// c.Assert(value, DeepEquals, 42)
+// c.Assert(array, DeepEquals, []string{"hi", "there"})
+//
+var DeepEquals Checker = &deepEqualsChecker{
+ &CheckerInfo{Name: "DeepEquals", Params: []string{"obtained", "expected"}},
+}
+
+func (checker *deepEqualsChecker) Check(params []interface{}, names []string) (result bool, error string) {
+ return reflect.DeepEqual(params[0], params[1]), ""
+}
+
+// -----------------------------------------------------------------------
+// HasLen checker.
+
+type hasLenChecker struct {
+ *CheckerInfo
+}
+
+// The HasLen checker verifies that the obtained value has the
+// provided length. In many cases this is superior to using Equals
+// in conjuction with the len function because in case the check
+// fails the value itself will be printed, instead of its length,
+// providing more details for figuring the problem.
+//
+// For example:
+//
+// c.Assert(list, HasLen, 5)
+//
+var HasLen Checker = &hasLenChecker{
+ &CheckerInfo{Name: "HasLen", Params: []string{"obtained", "n"}},
+}
+
+func (checker *hasLenChecker) Check(params []interface{}, names []string) (result bool, error string) {
+ n, ok := params[1].(int)
+ if !ok {
+ return false, "n must be an int"
+ }
+ value := reflect.ValueOf(params[0])
+ switch value.Kind() {
+ case reflect.Map, reflect.Array, reflect.Slice, reflect.Chan, reflect.String:
+ default:
+ return false, "obtained value type has no length"
+ }
+ return value.Len() == n, ""
+}
+
+// -----------------------------------------------------------------------
+// ErrorMatches checker.
+
+type errorMatchesChecker struct {
+ *CheckerInfo
+}
+
+// The ErrorMatches checker verifies that the error value
+// is non nil and matches the regular expression provided.
+//
+// For example:
+//
+// c.Assert(err, ErrorMatches, "perm.*denied")
+//
+var ErrorMatches Checker = errorMatchesChecker{
+ &CheckerInfo{Name: "ErrorMatches", Params: []string{"value", "regex"}},
+}
+
+func (checker errorMatchesChecker) Check(params []interface{}, names []string) (result bool, errStr string) {
+ if params[0] == nil {
+ return false, "Error value is nil"
+ }
+ err, ok := params[0].(error)
+ if !ok {
+ return false, "Value is not an error"
+ }
+ params[0] = err.Error()
+ names[0] = "error"
+ return matches(params[0], params[1])
+}
+
+// -----------------------------------------------------------------------
+// Matches checker.
+
+type matchesChecker struct {
+ *CheckerInfo
+}
+
+// The Matches checker verifies that the string provided as the obtained
+// value (or the string resulting from obtained.String()) matches the
+// regular expression provided.
+//
+// For example:
+//
+// c.Assert(err, Matches, "perm.*denied")
+//
+var Matches Checker = &matchesChecker{
+ &CheckerInfo{Name: "Matches", Params: []string{"value", "regex"}},
+}
+
+func (checker *matchesChecker) Check(params []interface{}, names []string) (result bool, error string) {
+ return matches(params[0], params[1])
+}
+
+func matches(value, regex interface{}) (result bool, error string) {
+ reStr, ok := regex.(string)
+ if !ok {
+ return false, "Regex must be a string"
+ }
+ valueStr, valueIsStr := value.(string)
+ if !valueIsStr {
+ if valueWithStr, valueHasStr := value.(fmt.Stringer); valueHasStr {
+ valueStr, valueIsStr = valueWithStr.String(), true
+ }
+ }
+ if valueIsStr {
+ matches, err := regexp.MatchString("^"+reStr+"$", valueStr)
+ if err != nil {
+ return false, "Can't compile regex: " + err.Error()
+ }
+ return matches, ""
+ }
+ return false, "Obtained value is not a string and has no .String()"
+}
+
+// -----------------------------------------------------------------------
+// Panics checker.
+
+type panicsChecker struct {
+ *CheckerInfo
+}
+
+// The Panics checker verifies that calling the provided zero-argument
+// function will cause a panic which is deep-equal to the provided value.
+//
+// For example:
+//
+// c.Assert(func() { f(1, 2) }, Panics, &SomeErrorType{"BOOM"}).
+//
+//
+var Panics Checker = &panicsChecker{
+ &CheckerInfo{Name: "Panics", Params: []string{"function", "expected"}},
+}
+
+func (checker *panicsChecker) Check(params []interface{}, names []string) (result bool, error string) {
+ f := reflect.ValueOf(params[0])
+ if f.Kind() != reflect.Func || f.Type().NumIn() != 0 {
+ return false, "Function must take zero arguments"
+ }
+ defer func() {
+ // If the function has not panicked, then don't do the check.
+ if error != "" {
+ return
+ }
+ params[0] = recover()
+ names[0] = "panic"
+ result = reflect.DeepEqual(params[0], params[1])
+ }()
+ f.Call(nil)
+ return false, "Function has not panicked"
+}
+
+type panicMatchesChecker struct {
+ *CheckerInfo
+}
+
+// The PanicMatches checker verifies that calling the provided zero-argument
+// function will cause a panic with an error value matching
+// the regular expression provided.
+//
+// For example:
+//
+// c.Assert(func() { f(1, 2) }, PanicMatches, `open.*: no such file or directory`).
+//
+//
+var PanicMatches Checker = &panicMatchesChecker{
+ &CheckerInfo{Name: "PanicMatches", Params: []string{"function", "expected"}},
+}
+
+func (checker *panicMatchesChecker) Check(params []interface{}, names []string) (result bool, errmsg string) {
+ f := reflect.ValueOf(params[0])
+ if f.Kind() != reflect.Func || f.Type().NumIn() != 0 {
+ return false, "Function must take zero arguments"
+ }
+ defer func() {
+ // If the function has not panicked, then don't do the check.
+ if errmsg != "" {
+ return
+ }
+ obtained := recover()
+ names[0] = "panic"
+ if e, ok := obtained.(error); ok {
+ params[0] = e.Error()
+ } else if _, ok := obtained.(string); ok {
+ params[0] = obtained
+ } else {
+ errmsg = "Panic value is not a string or an error"
+ return
+ }
+ result, errmsg = matches(params[0], params[1])
+ }()
+ f.Call(nil)
+ return false, "Function has not panicked"
+}
+
+// -----------------------------------------------------------------------
+// FitsTypeOf checker.
+
+type fitsTypeChecker struct {
+ *CheckerInfo
+}
+
+// The FitsTypeOf checker verifies that the obtained value is
+// assignable to a variable with the same type as the provided
+// sample value.
+//
+// For example:
+//
+// c.Assert(value, FitsTypeOf, int64(0))
+// c.Assert(value, FitsTypeOf, os.Error(nil))
+//
+var FitsTypeOf Checker = &fitsTypeChecker{
+ &CheckerInfo{Name: "FitsTypeOf", Params: []string{"obtained", "sample"}},
+}
+
+func (checker *fitsTypeChecker) Check(params []interface{}, names []string) (result bool, error string) {
+ obtained := reflect.ValueOf(params[0])
+ sample := reflect.ValueOf(params[1])
+ if !obtained.IsValid() {
+ return false, ""
+ }
+ if !sample.IsValid() {
+ return false, "Invalid sample value"
+ }
+ return obtained.Type().AssignableTo(sample.Type()), ""
+}
+
+// -----------------------------------------------------------------------
+// Implements checker.
+
+type implementsChecker struct {
+ *CheckerInfo
+}
+
+// The Implements checker verifies that the obtained value
+// implements the interface specified via a pointer to an interface
+// variable.
+//
+// For example:
+//
+// var e os.Error
+// c.Assert(err, Implements, &e)
+//
+var Implements Checker = &implementsChecker{
+ &CheckerInfo{Name: "Implements", Params: []string{"obtained", "ifaceptr"}},
+}
+
+func (checker *implementsChecker) Check(params []interface{}, names []string) (result bool, error string) {
+ obtained := reflect.ValueOf(params[0])
+ ifaceptr := reflect.ValueOf(params[1])
+ if !obtained.IsValid() {
+ return false, ""
+ }
+ if !ifaceptr.IsValid() || ifaceptr.Kind() != reflect.Ptr || ifaceptr.Elem().Kind() != reflect.Interface {
+ return false, "ifaceptr should be a pointer to an interface variable"
+ }
+ return obtained.Type().Implements(ifaceptr.Elem().Type()), ""
+}
diff --git a/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/checkers_test.go b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/checkers_test.go
new file mode 100644
index 0000000000..2da898af29
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/checkers_test.go
@@ -0,0 +1,272 @@
+package check_test
+
+import (
+ "errors"
+ "github.com/magiconair/properties/_third_party/gopkg.in/check.v1"
+ "reflect"
+ "runtime"
+)
+
+type CheckersS struct{}
+
+var _ = check.Suite(&CheckersS{})
+
+func testInfo(c *check.C, checker check.Checker, name string, paramNames []string) {
+ info := checker.Info()
+ if info.Name != name {
+ c.Fatalf("Got name %s, expected %s", info.Name, name)
+ }
+ if !reflect.DeepEqual(info.Params, paramNames) {
+ c.Fatalf("Got param names %#v, expected %#v", info.Params, paramNames)
+ }
+}
+
+func testCheck(c *check.C, checker check.Checker, result bool, error string, params ...interface{}) ([]interface{}, []string) {
+ info := checker.Info()
+ if len(params) != len(info.Params) {
+ c.Fatalf("unexpected param count in test; expected %d got %d", len(info.Params), len(params))
+ }
+ names := append([]string{}, info.Params...)
+ result_, error_ := checker.Check(params, names)
+ if result_ != result || error_ != error {
+ c.Fatalf("%s.Check(%#v) returned (%#v, %#v) rather than (%#v, %#v)",
+ info.Name, params, result_, error_, result, error)
+ }
+ return params, names
+}
+
+func (s *CheckersS) TestComment(c *check.C) {
+ bug := check.Commentf("a %d bc", 42)
+ comment := bug.CheckCommentString()
+ if comment != "a 42 bc" {
+ c.Fatalf("Commentf returned %#v", comment)
+ }
+}
+
+func (s *CheckersS) TestIsNil(c *check.C) {
+ testInfo(c, check.IsNil, "IsNil", []string{"value"})
+
+ testCheck(c, check.IsNil, true, "", nil)
+ testCheck(c, check.IsNil, false, "", "a")
+
+ testCheck(c, check.IsNil, true, "", (chan int)(nil))
+ testCheck(c, check.IsNil, false, "", make(chan int))
+ testCheck(c, check.IsNil, true, "", (error)(nil))
+ testCheck(c, check.IsNil, false, "", errors.New(""))
+ testCheck(c, check.IsNil, true, "", ([]int)(nil))
+ testCheck(c, check.IsNil, false, "", make([]int, 1))
+ testCheck(c, check.IsNil, false, "", int(0))
+}
+
+func (s *CheckersS) TestNotNil(c *check.C) {
+ testInfo(c, check.NotNil, "NotNil", []string{"value"})
+
+ testCheck(c, check.NotNil, false, "", nil)
+ testCheck(c, check.NotNil, true, "", "a")
+
+ testCheck(c, check.NotNil, false, "", (chan int)(nil))
+ testCheck(c, check.NotNil, true, "", make(chan int))
+ testCheck(c, check.NotNil, false, "", (error)(nil))
+ testCheck(c, check.NotNil, true, "", errors.New(""))
+ testCheck(c, check.NotNil, false, "", ([]int)(nil))
+ testCheck(c, check.NotNil, true, "", make([]int, 1))
+}
+
+func (s *CheckersS) TestNot(c *check.C) {
+ testInfo(c, check.Not(check.IsNil), "Not(IsNil)", []string{"value"})
+
+ testCheck(c, check.Not(check.IsNil), false, "", nil)
+ testCheck(c, check.Not(check.IsNil), true, "", "a")
+}
+
+type simpleStruct struct {
+ i int
+}
+
+func (s *CheckersS) TestEquals(c *check.C) {
+ testInfo(c, check.Equals, "Equals", []string{"obtained", "expected"})
+
+ // The simplest.
+ testCheck(c, check.Equals, true, "", 42, 42)
+ testCheck(c, check.Equals, false, "", 42, 43)
+
+ // Different native types.
+ testCheck(c, check.Equals, false, "", int32(42), int64(42))
+
+ // With nil.
+ testCheck(c, check.Equals, false, "", 42, nil)
+
+ // Slices
+ testCheck(c, check.Equals, false, "runtime error: comparing uncomparable type []uint8", []byte{1, 2}, []byte{1, 2})
+
+ // Struct values
+ testCheck(c, check.Equals, true, "", simpleStruct{1}, simpleStruct{1})
+ testCheck(c, check.Equals, false, "", simpleStruct{1}, simpleStruct{2})
+
+ // Struct pointers
+ testCheck(c, check.Equals, false, "", &simpleStruct{1}, &simpleStruct{1})
+ testCheck(c, check.Equals, false, "", &simpleStruct{1}, &simpleStruct{2})
+}
+
+func (s *CheckersS) TestDeepEquals(c *check.C) {
+ testInfo(c, check.DeepEquals, "DeepEquals", []string{"obtained", "expected"})
+
+ // The simplest.
+ testCheck(c, check.DeepEquals, true, "", 42, 42)
+ testCheck(c, check.DeepEquals, false, "", 42, 43)
+
+ // Different native types.
+ testCheck(c, check.DeepEquals, false, "", int32(42), int64(42))
+
+ // With nil.
+ testCheck(c, check.DeepEquals, false, "", 42, nil)
+
+ // Slices
+ testCheck(c, check.DeepEquals, true, "", []byte{1, 2}, []byte{1, 2})
+ testCheck(c, check.DeepEquals, false, "", []byte{1, 2}, []byte{1, 3})
+
+ // Struct values
+ testCheck(c, check.DeepEquals, true, "", simpleStruct{1}, simpleStruct{1})
+ testCheck(c, check.DeepEquals, false, "", simpleStruct{1}, simpleStruct{2})
+
+ // Struct pointers
+ testCheck(c, check.DeepEquals, true, "", &simpleStruct{1}, &simpleStruct{1})
+ testCheck(c, check.DeepEquals, false, "", &simpleStruct{1}, &simpleStruct{2})
+}
+
+func (s *CheckersS) TestHasLen(c *check.C) {
+ testInfo(c, check.HasLen, "HasLen", []string{"obtained", "n"})
+
+ testCheck(c, check.HasLen, true, "", "abcd", 4)
+ testCheck(c, check.HasLen, true, "", []int{1, 2}, 2)
+ testCheck(c, check.HasLen, false, "", []int{1, 2}, 3)
+
+ testCheck(c, check.HasLen, false, "n must be an int", []int{1, 2}, "2")
+ testCheck(c, check.HasLen, false, "obtained value type has no length", nil, 2)
+}
+
+func (s *CheckersS) TestErrorMatches(c *check.C) {
+ testInfo(c, check.ErrorMatches, "ErrorMatches", []string{"value", "regex"})
+
+ testCheck(c, check.ErrorMatches, false, "Error value is nil", nil, "some error")
+ testCheck(c, check.ErrorMatches, false, "Value is not an error", 1, "some error")
+ testCheck(c, check.ErrorMatches, true, "", errors.New("some error"), "some error")
+ testCheck(c, check.ErrorMatches, true, "", errors.New("some error"), "so.*or")
+
+ // Verify params mutation
+ params, names := testCheck(c, check.ErrorMatches, false, "", errors.New("some error"), "other error")
+ c.Assert(params[0], check.Equals, "some error")
+ c.Assert(names[0], check.Equals, "error")
+}
+
+func (s *CheckersS) TestMatches(c *check.C) {
+ testInfo(c, check.Matches, "Matches", []string{"value", "regex"})
+
+ // Simple matching
+ testCheck(c, check.Matches, true, "", "abc", "abc")
+ testCheck(c, check.Matches, true, "", "abc", "a.c")
+
+ // Must match fully
+ testCheck(c, check.Matches, false, "", "abc", "ab")
+ testCheck(c, check.Matches, false, "", "abc", "bc")
+
+ // String()-enabled values accepted
+ testCheck(c, check.Matches, true, "", reflect.ValueOf("abc"), "a.c")
+ testCheck(c, check.Matches, false, "", reflect.ValueOf("abc"), "a.d")
+
+ // Some error conditions.
+ testCheck(c, check.Matches, false, "Obtained value is not a string and has no .String()", 1, "a.c")
+ testCheck(c, check.Matches, false, "Can't compile regex: error parsing regexp: missing closing ]: `[c$`", "abc", "a[c")
+}
+
+func (s *CheckersS) TestPanics(c *check.C) {
+ testInfo(c, check.Panics, "Panics", []string{"function", "expected"})
+
+ // Some errors.
+ testCheck(c, check.Panics, false, "Function has not panicked", func() bool { return false }, "BOOM")
+ testCheck(c, check.Panics, false, "Function must take zero arguments", 1, "BOOM")
+
+ // Plain strings.
+ testCheck(c, check.Panics, true, "", func() { panic("BOOM") }, "BOOM")
+ testCheck(c, check.Panics, false, "", func() { panic("KABOOM") }, "BOOM")
+ testCheck(c, check.Panics, true, "", func() bool { panic("BOOM") }, "BOOM")
+
+ // Error values.
+ testCheck(c, check.Panics, true, "", func() { panic(errors.New("BOOM")) }, errors.New("BOOM"))
+ testCheck(c, check.Panics, false, "", func() { panic(errors.New("KABOOM")) }, errors.New("BOOM"))
+
+ type deep struct{ i int }
+ // Deep value
+ testCheck(c, check.Panics, true, "", func() { panic(&deep{99}) }, &deep{99})
+
+ // Verify params/names mutation
+ params, names := testCheck(c, check.Panics, false, "", func() { panic(errors.New("KABOOM")) }, errors.New("BOOM"))
+ c.Assert(params[0], check.ErrorMatches, "KABOOM")
+ c.Assert(names[0], check.Equals, "panic")
+
+ // Verify a nil panic
+ testCheck(c, check.Panics, true, "", func() { panic(nil) }, nil)
+ testCheck(c, check.Panics, false, "", func() { panic(nil) }, "NOPE")
+}
+
+func (s *CheckersS) TestPanicMatches(c *check.C) {
+ testInfo(c, check.PanicMatches, "PanicMatches", []string{"function", "expected"})
+
+ // Error matching.
+ testCheck(c, check.PanicMatches, true, "", func() { panic(errors.New("BOOM")) }, "BO.M")
+ testCheck(c, check.PanicMatches, false, "", func() { panic(errors.New("KABOOM")) }, "BO.M")
+
+ // Some errors.
+ testCheck(c, check.PanicMatches, false, "Function has not panicked", func() bool { return false }, "BOOM")
+ testCheck(c, check.PanicMatches, false, "Function must take zero arguments", 1, "BOOM")
+
+ // Plain strings.
+ testCheck(c, check.PanicMatches, true, "", func() { panic("BOOM") }, "BO.M")
+ testCheck(c, check.PanicMatches, false, "", func() { panic("KABOOM") }, "BOOM")
+ testCheck(c, check.PanicMatches, true, "", func() bool { panic("BOOM") }, "BO.M")
+
+ // Verify params/names mutation
+ params, names := testCheck(c, check.PanicMatches, false, "", func() { panic(errors.New("KABOOM")) }, "BOOM")
+ c.Assert(params[0], check.Equals, "KABOOM")
+ c.Assert(names[0], check.Equals, "panic")
+
+ // Verify a nil panic
+ testCheck(c, check.PanicMatches, false, "Panic value is not a string or an error", func() { panic(nil) }, "")
+}
+
+func (s *CheckersS) TestFitsTypeOf(c *check.C) {
+ testInfo(c, check.FitsTypeOf, "FitsTypeOf", []string{"obtained", "sample"})
+
+ // Basic types
+ testCheck(c, check.FitsTypeOf, true, "", 1, 0)
+ testCheck(c, check.FitsTypeOf, false, "", 1, int64(0))
+
+ // Aliases
+ testCheck(c, check.FitsTypeOf, false, "", 1, errors.New(""))
+ testCheck(c, check.FitsTypeOf, false, "", "error", errors.New(""))
+ testCheck(c, check.FitsTypeOf, true, "", errors.New("error"), errors.New(""))
+
+ // Structures
+ testCheck(c, check.FitsTypeOf, false, "", 1, simpleStruct{})
+ testCheck(c, check.FitsTypeOf, false, "", simpleStruct{42}, &simpleStruct{})
+ testCheck(c, check.FitsTypeOf, true, "", simpleStruct{42}, simpleStruct{})
+ testCheck(c, check.FitsTypeOf, true, "", &simpleStruct{42}, &simpleStruct{})
+
+ // Some bad values
+ testCheck(c, check.FitsTypeOf, false, "Invalid sample value", 1, interface{}(nil))
+ testCheck(c, check.FitsTypeOf, false, "", interface{}(nil), 0)
+}
+
+func (s *CheckersS) TestImplements(c *check.C) {
+ testInfo(c, check.Implements, "Implements", []string{"obtained", "ifaceptr"})
+
+ var e error
+ var re runtime.Error
+ testCheck(c, check.Implements, true, "", errors.New(""), &e)
+ testCheck(c, check.Implements, false, "", errors.New(""), &re)
+
+ // Some bad values
+ testCheck(c, check.Implements, false, "ifaceptr should be a pointer to an interface variable", 0, errors.New(""))
+ testCheck(c, check.Implements, false, "ifaceptr should be a pointer to an interface variable", 0, interface{}(nil))
+ testCheck(c, check.Implements, false, "", interface{}(nil), &e)
+}
diff --git a/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/export_test.go b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/export_test.go
new file mode 100644
index 0000000000..0e6cfe0f22
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/export_test.go
@@ -0,0 +1,9 @@
+package check
+
+func PrintLine(filename string, line int) (string, error) {
+ return printLine(filename, line)
+}
+
+func Indent(s, with string) string {
+ return indent(s, with)
+}
diff --git a/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/fixture_test.go b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/fixture_test.go
new file mode 100644
index 0000000000..eaff23a1b9
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/fixture_test.go
@@ -0,0 +1,484 @@
+// Tests for the behavior of the test fixture system.
+
+package check_test
+
+import (
+ . "github.com/magiconair/properties/_third_party/gopkg.in/check.v1"
+)
+
+// -----------------------------------------------------------------------
+// Fixture test suite.
+
+type FixtureS struct{}
+
+var fixtureS = Suite(&FixtureS{})
+
+func (s *FixtureS) TestCountSuite(c *C) {
+ suitesRun += 1
+}
+
+// -----------------------------------------------------------------------
+// Basic fixture ordering verification.
+
+func (s *FixtureS) TestOrder(c *C) {
+ helper := FixtureHelper{}
+ Run(&helper, nil)
+ c.Check(helper.calls[0], Equals, "SetUpSuite")
+ c.Check(helper.calls[1], Equals, "SetUpTest")
+ c.Check(helper.calls[2], Equals, "Test1")
+ c.Check(helper.calls[3], Equals, "TearDownTest")
+ c.Check(helper.calls[4], Equals, "SetUpTest")
+ c.Check(helper.calls[5], Equals, "Test2")
+ c.Check(helper.calls[6], Equals, "TearDownTest")
+ c.Check(helper.calls[7], Equals, "TearDownSuite")
+ c.Check(len(helper.calls), Equals, 8)
+}
+
+// -----------------------------------------------------------------------
+// Check the behavior when panics occur within tests and fixtures.
+
+func (s *FixtureS) TestPanicOnTest(c *C) {
+ helper := FixtureHelper{panicOn: "Test1"}
+ output := String{}
+ Run(&helper, &RunConf{Output: &output})
+ c.Check(helper.calls[0], Equals, "SetUpSuite")
+ c.Check(helper.calls[1], Equals, "SetUpTest")
+ c.Check(helper.calls[2], Equals, "Test1")
+ c.Check(helper.calls[3], Equals, "TearDownTest")
+ c.Check(helper.calls[4], Equals, "SetUpTest")
+ c.Check(helper.calls[5], Equals, "Test2")
+ c.Check(helper.calls[6], Equals, "TearDownTest")
+ c.Check(helper.calls[7], Equals, "TearDownSuite")
+ c.Check(len(helper.calls), Equals, 8)
+
+ expected := "^\n-+\n" +
+ "PANIC: check_test\\.go:[0-9]+: FixtureHelper.Test1\n\n" +
+ "\\.\\.\\. Panic: Test1 \\(PC=[xA-F0-9]+\\)\n\n" +
+ ".+:[0-9]+\n" +
+ " in (go)?panic\n" +
+ ".*check_test.go:[0-9]+\n" +
+ " in FixtureHelper.trace\n" +
+ ".*check_test.go:[0-9]+\n" +
+ " in FixtureHelper.Test1\n" +
+ "(.|\n)*$"
+
+ c.Check(output.value, Matches, expected)
+}
+
+func (s *FixtureS) TestPanicOnSetUpTest(c *C) {
+ helper := FixtureHelper{panicOn: "SetUpTest"}
+ output := String{}
+ Run(&helper, &RunConf{Output: &output})
+ c.Check(helper.calls[0], Equals, "SetUpSuite")
+ c.Check(helper.calls[1], Equals, "SetUpTest")
+ c.Check(helper.calls[2], Equals, "TearDownTest")
+ c.Check(helper.calls[3], Equals, "TearDownSuite")
+ c.Check(len(helper.calls), Equals, 4)
+
+ expected := "^\n-+\n" +
+ "PANIC: check_test\\.go:[0-9]+: " +
+ "FixtureHelper\\.SetUpTest\n\n" +
+ "\\.\\.\\. Panic: SetUpTest \\(PC=[xA-F0-9]+\\)\n\n" +
+ ".+:[0-9]+\n" +
+ " in (go)?panic\n" +
+ ".*check_test.go:[0-9]+\n" +
+ " in FixtureHelper.trace\n" +
+ ".*check_test.go:[0-9]+\n" +
+ " in FixtureHelper.SetUpTest\n" +
+ "(.|\n)*" +
+ "\n-+\n" +
+ "PANIC: check_test\\.go:[0-9]+: " +
+ "FixtureHelper\\.Test1\n\n" +
+ "\\.\\.\\. Panic: Fixture has panicked " +
+ "\\(see related PANIC\\)\n$"
+
+ c.Check(output.value, Matches, expected)
+}
+
+func (s *FixtureS) TestPanicOnTearDownTest(c *C) {
+ helper := FixtureHelper{panicOn: "TearDownTest"}
+ output := String{}
+ Run(&helper, &RunConf{Output: &output})
+ c.Check(helper.calls[0], Equals, "SetUpSuite")
+ c.Check(helper.calls[1], Equals, "SetUpTest")
+ c.Check(helper.calls[2], Equals, "Test1")
+ c.Check(helper.calls[3], Equals, "TearDownTest")
+ c.Check(helper.calls[4], Equals, "TearDownSuite")
+ c.Check(len(helper.calls), Equals, 5)
+
+ expected := "^\n-+\n" +
+ "PANIC: check_test\\.go:[0-9]+: " +
+ "FixtureHelper.TearDownTest\n\n" +
+ "\\.\\.\\. Panic: TearDownTest \\(PC=[xA-F0-9]+\\)\n\n" +
+ ".+:[0-9]+\n" +
+ " in (go)?panic\n" +
+ ".*check_test.go:[0-9]+\n" +
+ " in FixtureHelper.trace\n" +
+ ".*check_test.go:[0-9]+\n" +
+ " in FixtureHelper.TearDownTest\n" +
+ "(.|\n)*" +
+ "\n-+\n" +
+ "PANIC: check_test\\.go:[0-9]+: " +
+ "FixtureHelper\\.Test1\n\n" +
+ "\\.\\.\\. Panic: Fixture has panicked " +
+ "\\(see related PANIC\\)\n$"
+
+ c.Check(output.value, Matches, expected)
+}
+
+func (s *FixtureS) TestPanicOnSetUpSuite(c *C) {
+ helper := FixtureHelper{panicOn: "SetUpSuite"}
+ output := String{}
+ Run(&helper, &RunConf{Output: &output})
+ c.Check(helper.calls[0], Equals, "SetUpSuite")
+ c.Check(helper.calls[1], Equals, "TearDownSuite")
+ c.Check(len(helper.calls), Equals, 2)
+
+ expected := "^\n-+\n" +
+ "PANIC: check_test\\.go:[0-9]+: " +
+ "FixtureHelper.SetUpSuite\n\n" +
+ "\\.\\.\\. Panic: SetUpSuite \\(PC=[xA-F0-9]+\\)\n\n" +
+ ".+:[0-9]+\n" +
+ " in (go)?panic\n" +
+ ".*check_test.go:[0-9]+\n" +
+ " in FixtureHelper.trace\n" +
+ ".*check_test.go:[0-9]+\n" +
+ " in FixtureHelper.SetUpSuite\n" +
+ "(.|\n)*$"
+
+ c.Check(output.value, Matches, expected)
+}
+
+func (s *FixtureS) TestPanicOnTearDownSuite(c *C) {
+ helper := FixtureHelper{panicOn: "TearDownSuite"}
+ output := String{}
+ Run(&helper, &RunConf{Output: &output})
+ c.Check(helper.calls[0], Equals, "SetUpSuite")
+ c.Check(helper.calls[1], Equals, "SetUpTest")
+ c.Check(helper.calls[2], Equals, "Test1")
+ c.Check(helper.calls[3], Equals, "TearDownTest")
+ c.Check(helper.calls[4], Equals, "SetUpTest")
+ c.Check(helper.calls[5], Equals, "Test2")
+ c.Check(helper.calls[6], Equals, "TearDownTest")
+ c.Check(helper.calls[7], Equals, "TearDownSuite")
+ c.Check(len(helper.calls), Equals, 8)
+
+ expected := "^\n-+\n" +
+ "PANIC: check_test\\.go:[0-9]+: " +
+ "FixtureHelper.TearDownSuite\n\n" +
+ "\\.\\.\\. Panic: TearDownSuite \\(PC=[xA-F0-9]+\\)\n\n" +
+ ".+:[0-9]+\n" +
+ " in (go)?panic\n" +
+ ".*check_test.go:[0-9]+\n" +
+ " in FixtureHelper.trace\n" +
+ ".*check_test.go:[0-9]+\n" +
+ " in FixtureHelper.TearDownSuite\n" +
+ "(.|\n)*$"
+
+ c.Check(output.value, Matches, expected)
+}
+
+// -----------------------------------------------------------------------
+// A wrong argument on a test or fixture will produce a nice error.
+
+func (s *FixtureS) TestPanicOnWrongTestArg(c *C) {
+ helper := WrongTestArgHelper{}
+ output := String{}
+ Run(&helper, &RunConf{Output: &output})
+ c.Check(helper.calls[0], Equals, "SetUpSuite")
+ c.Check(helper.calls[1], Equals, "SetUpTest")
+ c.Check(helper.calls[2], Equals, "TearDownTest")
+ c.Check(helper.calls[3], Equals, "SetUpTest")
+ c.Check(helper.calls[4], Equals, "Test2")
+ c.Check(helper.calls[5], Equals, "TearDownTest")
+ c.Check(helper.calls[6], Equals, "TearDownSuite")
+ c.Check(len(helper.calls), Equals, 7)
+
+ expected := "^\n-+\n" +
+ "PANIC: fixture_test\\.go:[0-9]+: " +
+ "WrongTestArgHelper\\.Test1\n\n" +
+ "\\.\\.\\. Panic: WrongTestArgHelper\\.Test1 argument " +
+ "should be \\*check\\.C\n"
+
+ c.Check(output.value, Matches, expected)
+}
+
+func (s *FixtureS) TestPanicOnWrongSetUpTestArg(c *C) {
+ helper := WrongSetUpTestArgHelper{}
+ output := String{}
+ Run(&helper, &RunConf{Output: &output})
+ c.Check(len(helper.calls), Equals, 0)
+
+ expected :=
+ "^\n-+\n" +
+ "PANIC: fixture_test\\.go:[0-9]+: " +
+ "WrongSetUpTestArgHelper\\.SetUpTest\n\n" +
+ "\\.\\.\\. Panic: WrongSetUpTestArgHelper\\.SetUpTest argument " +
+ "should be \\*check\\.C\n"
+
+ c.Check(output.value, Matches, expected)
+}
+
+func (s *FixtureS) TestPanicOnWrongSetUpSuiteArg(c *C) {
+ helper := WrongSetUpSuiteArgHelper{}
+ output := String{}
+ Run(&helper, &RunConf{Output: &output})
+ c.Check(len(helper.calls), Equals, 0)
+
+ expected :=
+ "^\n-+\n" +
+ "PANIC: fixture_test\\.go:[0-9]+: " +
+ "WrongSetUpSuiteArgHelper\\.SetUpSuite\n\n" +
+ "\\.\\.\\. Panic: WrongSetUpSuiteArgHelper\\.SetUpSuite argument " +
+ "should be \\*check\\.C\n"
+
+ c.Check(output.value, Matches, expected)
+}
+
+// -----------------------------------------------------------------------
+// Nice errors also when tests or fixture have wrong arg count.
+
+func (s *FixtureS) TestPanicOnWrongTestArgCount(c *C) {
+ helper := WrongTestArgCountHelper{}
+ output := String{}
+ Run(&helper, &RunConf{Output: &output})
+ c.Check(helper.calls[0], Equals, "SetUpSuite")
+ c.Check(helper.calls[1], Equals, "SetUpTest")
+ c.Check(helper.calls[2], Equals, "TearDownTest")
+ c.Check(helper.calls[3], Equals, "SetUpTest")
+ c.Check(helper.calls[4], Equals, "Test2")
+ c.Check(helper.calls[5], Equals, "TearDownTest")
+ c.Check(helper.calls[6], Equals, "TearDownSuite")
+ c.Check(len(helper.calls), Equals, 7)
+
+ expected := "^\n-+\n" +
+ "PANIC: fixture_test\\.go:[0-9]+: " +
+ "WrongTestArgCountHelper\\.Test1\n\n" +
+ "\\.\\.\\. Panic: WrongTestArgCountHelper\\.Test1 argument " +
+ "should be \\*check\\.C\n"
+
+ c.Check(output.value, Matches, expected)
+}
+
+func (s *FixtureS) TestPanicOnWrongSetUpTestArgCount(c *C) {
+ helper := WrongSetUpTestArgCountHelper{}
+ output := String{}
+ Run(&helper, &RunConf{Output: &output})
+ c.Check(len(helper.calls), Equals, 0)
+
+ expected :=
+ "^\n-+\n" +
+ "PANIC: fixture_test\\.go:[0-9]+: " +
+ "WrongSetUpTestArgCountHelper\\.SetUpTest\n\n" +
+ "\\.\\.\\. Panic: WrongSetUpTestArgCountHelper\\.SetUpTest argument " +
+ "should be \\*check\\.C\n"
+
+ c.Check(output.value, Matches, expected)
+}
+
+func (s *FixtureS) TestPanicOnWrongSetUpSuiteArgCount(c *C) {
+ helper := WrongSetUpSuiteArgCountHelper{}
+ output := String{}
+ Run(&helper, &RunConf{Output: &output})
+ c.Check(len(helper.calls), Equals, 0)
+
+ expected :=
+ "^\n-+\n" +
+ "PANIC: fixture_test\\.go:[0-9]+: " +
+ "WrongSetUpSuiteArgCountHelper\\.SetUpSuite\n\n" +
+ "\\.\\.\\. Panic: WrongSetUpSuiteArgCountHelper" +
+ "\\.SetUpSuite argument should be \\*check\\.C\n"
+
+ c.Check(output.value, Matches, expected)
+}
+
+// -----------------------------------------------------------------------
+// Helper test suites with wrong function arguments.
+
+type WrongTestArgHelper struct {
+ FixtureHelper
+}
+
+func (s *WrongTestArgHelper) Test1(t int) {
+}
+
+type WrongSetUpTestArgHelper struct {
+ FixtureHelper
+}
+
+func (s *WrongSetUpTestArgHelper) SetUpTest(t int) {
+}
+
+type WrongSetUpSuiteArgHelper struct {
+ FixtureHelper
+}
+
+func (s *WrongSetUpSuiteArgHelper) SetUpSuite(t int) {
+}
+
+type WrongTestArgCountHelper struct {
+ FixtureHelper
+}
+
+func (s *WrongTestArgCountHelper) Test1(c *C, i int) {
+}
+
+type WrongSetUpTestArgCountHelper struct {
+ FixtureHelper
+}
+
+func (s *WrongSetUpTestArgCountHelper) SetUpTest(c *C, i int) {
+}
+
+type WrongSetUpSuiteArgCountHelper struct {
+ FixtureHelper
+}
+
+func (s *WrongSetUpSuiteArgCountHelper) SetUpSuite(c *C, i int) {
+}
+
+// -----------------------------------------------------------------------
+// Ensure fixture doesn't run without tests.
+
+type NoTestsHelper struct {
+ hasRun bool
+}
+
+func (s *NoTestsHelper) SetUpSuite(c *C) {
+ s.hasRun = true
+}
+
+func (s *NoTestsHelper) TearDownSuite(c *C) {
+ s.hasRun = true
+}
+
+func (s *FixtureS) TestFixtureDoesntRunWithoutTests(c *C) {
+ helper := NoTestsHelper{}
+ output := String{}
+ Run(&helper, &RunConf{Output: &output})
+ c.Check(helper.hasRun, Equals, false)
+}
+
+// -----------------------------------------------------------------------
+// Verify that checks and assertions work correctly inside the fixture.
+
+type FixtureCheckHelper struct {
+ fail string
+ completed bool
+}
+
+func (s *FixtureCheckHelper) SetUpSuite(c *C) {
+ switch s.fail {
+ case "SetUpSuiteAssert":
+ c.Assert(false, Equals, true)
+ case "SetUpSuiteCheck":
+ c.Check(false, Equals, true)
+ }
+ s.completed = true
+}
+
+func (s *FixtureCheckHelper) SetUpTest(c *C) {
+ switch s.fail {
+ case "SetUpTestAssert":
+ c.Assert(false, Equals, true)
+ case "SetUpTestCheck":
+ c.Check(false, Equals, true)
+ }
+ s.completed = true
+}
+
+func (s *FixtureCheckHelper) Test(c *C) {
+ // Do nothing.
+}
+
+func (s *FixtureS) TestSetUpSuiteCheck(c *C) {
+ helper := FixtureCheckHelper{fail: "SetUpSuiteCheck"}
+ output := String{}
+ Run(&helper, &RunConf{Output: &output})
+ c.Assert(output.value, Matches,
+ "\n---+\n"+
+ "FAIL: fixture_test\\.go:[0-9]+: "+
+ "FixtureCheckHelper\\.SetUpSuite\n\n"+
+ "fixture_test\\.go:[0-9]+:\n"+
+ " c\\.Check\\(false, Equals, true\\)\n"+
+ "\\.+ obtained bool = false\n"+
+ "\\.+ expected bool = true\n\n")
+ c.Assert(helper.completed, Equals, true)
+}
+
+func (s *FixtureS) TestSetUpSuiteAssert(c *C) {
+ helper := FixtureCheckHelper{fail: "SetUpSuiteAssert"}
+ output := String{}
+ Run(&helper, &RunConf{Output: &output})
+ c.Assert(output.value, Matches,
+ "\n---+\n"+
+ "FAIL: fixture_test\\.go:[0-9]+: "+
+ "FixtureCheckHelper\\.SetUpSuite\n\n"+
+ "fixture_test\\.go:[0-9]+:\n"+
+ " c\\.Assert\\(false, Equals, true\\)\n"+
+ "\\.+ obtained bool = false\n"+
+ "\\.+ expected bool = true\n\n")
+ c.Assert(helper.completed, Equals, false)
+}
+
+// -----------------------------------------------------------------------
+// Verify that logging within SetUpTest() persists within the test log itself.
+
+type FixtureLogHelper struct {
+ c *C
+}
+
+func (s *FixtureLogHelper) SetUpTest(c *C) {
+ s.c = c
+ c.Log("1")
+}
+
+func (s *FixtureLogHelper) Test(c *C) {
+ c.Log("2")
+ s.c.Log("3")
+ c.Log("4")
+ c.Fail()
+}
+
+func (s *FixtureLogHelper) TearDownTest(c *C) {
+ s.c.Log("5")
+}
+
+func (s *FixtureS) TestFixtureLogging(c *C) {
+ helper := FixtureLogHelper{}
+ output := String{}
+ Run(&helper, &RunConf{Output: &output})
+ c.Assert(output.value, Matches,
+ "\n---+\n"+
+ "FAIL: fixture_test\\.go:[0-9]+: "+
+ "FixtureLogHelper\\.Test\n\n"+
+ "1\n2\n3\n4\n5\n")
+}
+
+// -----------------------------------------------------------------------
+// Skip() within fixture methods.
+
+func (s *FixtureS) TestSkipSuite(c *C) {
+ helper := FixtureHelper{skip: true, skipOnN: 0}
+ output := String{}
+ result := Run(&helper, &RunConf{Output: &output})
+ c.Assert(output.value, Equals, "")
+ c.Assert(helper.calls[0], Equals, "SetUpSuite")
+ c.Assert(helper.calls[1], Equals, "TearDownSuite")
+ c.Assert(len(helper.calls), Equals, 2)
+ c.Assert(result.Skipped, Equals, 2)
+}
+
+func (s *FixtureS) TestSkipTest(c *C) {
+ helper := FixtureHelper{skip: true, skipOnN: 1}
+ output := String{}
+ result := Run(&helper, &RunConf{Output: &output})
+ c.Assert(helper.calls[0], Equals, "SetUpSuite")
+ c.Assert(helper.calls[1], Equals, "SetUpTest")
+ c.Assert(helper.calls[2], Equals, "SetUpTest")
+ c.Assert(helper.calls[3], Equals, "Test2")
+ c.Assert(helper.calls[4], Equals, "TearDownTest")
+ c.Assert(helper.calls[5], Equals, "TearDownSuite")
+ c.Assert(len(helper.calls), Equals, 6)
+ c.Assert(result.Skipped, Equals, 1)
+}
diff --git a/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/foundation_test.go b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/foundation_test.go
new file mode 100644
index 0000000000..809ef65e98
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/foundation_test.go
@@ -0,0 +1,335 @@
+// These tests check that the foundations of gocheck are working properly.
+// They already assume that fundamental failing is working already, though,
+// since this was tested in bootstrap_test.go. Even then, some care may
+// still have to be taken when using external functions, since they should
+// of course not rely on functionality tested here.
+
+package check_test
+
+import (
+ "fmt"
+ "github.com/magiconair/properties/_third_party/gopkg.in/check.v1"
+ "log"
+ "os"
+ "regexp"
+ "strings"
+)
+
+// -----------------------------------------------------------------------
+// Foundation test suite.
+
+type FoundationS struct{}
+
+var foundationS = check.Suite(&FoundationS{})
+
+func (s *FoundationS) TestCountSuite(c *check.C) {
+ suitesRun += 1
+}
+
+func (s *FoundationS) TestErrorf(c *check.C) {
+ // Do not use checkState() here. It depends on Errorf() working.
+ expectedLog := fmt.Sprintf("foundation_test.go:%d:\n"+
+ " c.Errorf(\"Error %%v!\", \"message\")\n"+
+ "... Error: Error message!\n\n",
+ getMyLine()+1)
+ c.Errorf("Error %v!", "message")
+ failed := c.Failed()
+ c.Succeed()
+ if log := c.GetTestLog(); log != expectedLog {
+ c.Logf("Errorf() logged %#v rather than %#v", log, expectedLog)
+ c.Fail()
+ }
+ if !failed {
+ c.Logf("Errorf() didn't put the test in a failed state")
+ c.Fail()
+ }
+}
+
+func (s *FoundationS) TestError(c *check.C) {
+ expectedLog := fmt.Sprintf("foundation_test.go:%d:\n"+
+ " c\\.Error\\(\"Error \", \"message!\"\\)\n"+
+ "\\.\\.\\. Error: Error message!\n\n",
+ getMyLine()+1)
+ c.Error("Error ", "message!")
+ checkState(c, nil,
+ &expectedState{
+ name: "Error(`Error `, `message!`)",
+ failed: true,
+ log: expectedLog,
+ })
+}
+
+func (s *FoundationS) TestFailNow(c *check.C) {
+ defer (func() {
+ if !c.Failed() {
+ c.Error("FailNow() didn't fail the test")
+ } else {
+ c.Succeed()
+ if c.GetTestLog() != "" {
+ c.Error("Something got logged:\n" + c.GetTestLog())
+ }
+ }
+ })()
+
+ c.FailNow()
+ c.Log("FailNow() didn't stop the test")
+}
+
+func (s *FoundationS) TestSucceedNow(c *check.C) {
+ defer (func() {
+ if c.Failed() {
+ c.Error("SucceedNow() didn't succeed the test")
+ }
+ if c.GetTestLog() != "" {
+ c.Error("Something got logged:\n" + c.GetTestLog())
+ }
+ })()
+
+ c.Fail()
+ c.SucceedNow()
+ c.Log("SucceedNow() didn't stop the test")
+}
+
+func (s *FoundationS) TestFailureHeader(c *check.C) {
+ output := String{}
+ failHelper := FailHelper{}
+ check.Run(&failHelper, &check.RunConf{Output: &output})
+ header := fmt.Sprintf(""+
+ "\n-----------------------------------"+
+ "-----------------------------------\n"+
+ "FAIL: check_test.go:%d: FailHelper.TestLogAndFail\n",
+ failHelper.testLine)
+ if strings.Index(output.value, header) == -1 {
+ c.Errorf(""+
+ "Failure didn't print a proper header.\n"+
+ "... Got:\n%s... Expected something with:\n%s",
+ output.value, header)
+ }
+}
+
+func (s *FoundationS) TestFatal(c *check.C) {
+ var line int
+ defer (func() {
+ if !c.Failed() {
+ c.Error("Fatal() didn't fail the test")
+ } else {
+ c.Succeed()
+ expected := fmt.Sprintf("foundation_test.go:%d:\n"+
+ " c.Fatal(\"Die \", \"now!\")\n"+
+ "... Error: Die now!\n\n",
+ line)
+ if c.GetTestLog() != expected {
+ c.Error("Incorrect log:", c.GetTestLog())
+ }
+ }
+ })()
+
+ line = getMyLine() + 1
+ c.Fatal("Die ", "now!")
+ c.Log("Fatal() didn't stop the test")
+}
+
+func (s *FoundationS) TestFatalf(c *check.C) {
+ var line int
+ defer (func() {
+ if !c.Failed() {
+ c.Error("Fatalf() didn't fail the test")
+ } else {
+ c.Succeed()
+ expected := fmt.Sprintf("foundation_test.go:%d:\n"+
+ " c.Fatalf(\"Die %%s!\", \"now\")\n"+
+ "... Error: Die now!\n\n",
+ line)
+ if c.GetTestLog() != expected {
+ c.Error("Incorrect log:", c.GetTestLog())
+ }
+ }
+ })()
+
+ line = getMyLine() + 1
+ c.Fatalf("Die %s!", "now")
+ c.Log("Fatalf() didn't stop the test")
+}
+
+func (s *FoundationS) TestCallerLoggingInsideTest(c *check.C) {
+ log := fmt.Sprintf(""+
+ "foundation_test.go:%d:\n"+
+ " result := c.Check\\(10, check.Equals, 20\\)\n"+
+ "\\.\\.\\. obtained int = 10\n"+
+ "\\.\\.\\. expected int = 20\n\n",
+ getMyLine()+1)
+ result := c.Check(10, check.Equals, 20)
+ checkState(c, result,
+ &expectedState{
+ name: "Check(10, Equals, 20)",
+ result: false,
+ failed: true,
+ log: log,
+ })
+}
+
+func (s *FoundationS) TestCallerLoggingInDifferentFile(c *check.C) {
+ result, line := checkEqualWrapper(c, 10, 20)
+ testLine := getMyLine() - 1
+ log := fmt.Sprintf(""+
+ "foundation_test.go:%d:\n"+
+ " result, line := checkEqualWrapper\\(c, 10, 20\\)\n"+
+ "check_test.go:%d:\n"+
+ " return c.Check\\(obtained, check.Equals, expected\\), getMyLine\\(\\)\n"+
+ "\\.\\.\\. obtained int = 10\n"+
+ "\\.\\.\\. expected int = 20\n\n",
+ testLine, line)
+ checkState(c, result,
+ &expectedState{
+ name: "Check(10, Equals, 20)",
+ result: false,
+ failed: true,
+ log: log,
+ })
+}
+
+// -----------------------------------------------------------------------
+// ExpectFailure() inverts the logic of failure.
+
+type ExpectFailureSucceedHelper struct{}
+
+func (s *ExpectFailureSucceedHelper) TestSucceed(c *check.C) {
+ c.ExpectFailure("It booms!")
+ c.Error("Boom!")
+}
+
+type ExpectFailureFailHelper struct{}
+
+func (s *ExpectFailureFailHelper) TestFail(c *check.C) {
+ c.ExpectFailure("Bug #XYZ")
+}
+
+func (s *FoundationS) TestExpectFailureFail(c *check.C) {
+ helper := ExpectFailureFailHelper{}
+ output := String{}
+ result := check.Run(&helper, &check.RunConf{Output: &output})
+
+ expected := "" +
+ "^\n-+\n" +
+ "FAIL: foundation_test\\.go:[0-9]+:" +
+ " ExpectFailureFailHelper\\.TestFail\n\n" +
+ "\\.\\.\\. Error: Test succeeded, but was expected to fail\n" +
+ "\\.\\.\\. Reason: Bug #XYZ\n$"
+
+ matched, err := regexp.MatchString(expected, output.value)
+ if err != nil {
+ c.Error("Bad expression: ", expected)
+ } else if !matched {
+ c.Error("ExpectFailure() didn't log properly:\n", output.value)
+ }
+
+ c.Assert(result.ExpectedFailures, check.Equals, 0)
+}
+
+func (s *FoundationS) TestExpectFailureSucceed(c *check.C) {
+ helper := ExpectFailureSucceedHelper{}
+ output := String{}
+ result := check.Run(&helper, &check.RunConf{Output: &output})
+
+ c.Assert(output.value, check.Equals, "")
+ c.Assert(result.ExpectedFailures, check.Equals, 1)
+}
+
+func (s *FoundationS) TestExpectFailureSucceedVerbose(c *check.C) {
+ helper := ExpectFailureSucceedHelper{}
+ output := String{}
+ result := check.Run(&helper, &check.RunConf{Output: &output, Verbose: true})
+
+ expected := "" +
+ "FAIL EXPECTED: foundation_test\\.go:[0-9]+:" +
+ " ExpectFailureSucceedHelper\\.TestSucceed \\(It booms!\\)\t *[.0-9]+s\n"
+
+ matched, err := regexp.MatchString(expected, output.value)
+ if err != nil {
+ c.Error("Bad expression: ", expected)
+ } else if !matched {
+ c.Error("ExpectFailure() didn't log properly:\n", output.value)
+ }
+
+ c.Assert(result.ExpectedFailures, check.Equals, 1)
+}
+
+// -----------------------------------------------------------------------
+// Skip() allows stopping a test without positive/negative results.
+
+type SkipTestHelper struct{}
+
+func (s *SkipTestHelper) TestFail(c *check.C) {
+ c.Skip("Wrong platform or whatever")
+ c.Error("Boom!")
+}
+
+func (s *FoundationS) TestSkip(c *check.C) {
+ helper := SkipTestHelper{}
+ output := String{}
+ check.Run(&helper, &check.RunConf{Output: &output})
+
+ if output.value != "" {
+ c.Error("Skip() logged something:\n", output.value)
+ }
+}
+
+func (s *FoundationS) TestSkipVerbose(c *check.C) {
+ helper := SkipTestHelper{}
+ output := String{}
+ check.Run(&helper, &check.RunConf{Output: &output, Verbose: true})
+
+ expected := "SKIP: foundation_test\\.go:[0-9]+: SkipTestHelper\\.TestFail" +
+ " \\(Wrong platform or whatever\\)"
+ matched, err := regexp.MatchString(expected, output.value)
+ if err != nil {
+ c.Error("Bad expression: ", expected)
+ } else if !matched {
+ c.Error("Skip() didn't log properly:\n", output.value)
+ }
+}
+
+// -----------------------------------------------------------------------
+// Check minimum *log.Logger interface provided by *check.C.
+
+type minLogger interface {
+ Output(calldepth int, s string) error
+}
+
+func (s *BootstrapS) TestMinLogger(c *check.C) {
+ var logger minLogger
+ logger = log.New(os.Stderr, "", 0)
+ logger = c
+ logger.Output(0, "Hello there")
+ expected := `\[LOG\] [0-9]+:[0-9][0-9]\.[0-9][0-9][0-9] +Hello there\n`
+ output := c.GetTestLog()
+ c.Assert(output, check.Matches, expected)
+}
+
+// -----------------------------------------------------------------------
+// Ensure that suites with embedded types are working fine, including the
+// the workaround for issue 906.
+
+type EmbeddedInternalS struct {
+ called bool
+}
+
+type EmbeddedS struct {
+ EmbeddedInternalS
+}
+
+var embeddedS = check.Suite(&EmbeddedS{})
+
+func (s *EmbeddedS) TestCountSuite(c *check.C) {
+ suitesRun += 1
+}
+
+func (s *EmbeddedInternalS) TestMethod(c *check.C) {
+ c.Error("TestMethod() of the embedded type was called!?")
+}
+
+func (s *EmbeddedS) TestMethod(c *check.C) {
+ // http://code.google.com/p/go/issues/detail?id=906
+ c.Check(s.called, check.Equals, false) // Go issue 906 is affecting the runner?
+ s.called = true
+}
diff --git a/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/helpers.go b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/helpers.go
new file mode 100644
index 0000000000..4b6c26da45
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/helpers.go
@@ -0,0 +1,231 @@
+package check
+
+import (
+ "fmt"
+ "strings"
+ "time"
+)
+
+// TestName returns the current test name in the form "SuiteName.TestName"
+func (c *C) TestName() string {
+ return c.testName
+}
+
+// -----------------------------------------------------------------------
+// Basic succeeding/failing logic.
+
+// Failed returns whether the currently running test has already failed.
+func (c *C) Failed() bool {
+ return c.status == failedSt
+}
+
+// Fail marks the currently running test as failed.
+//
+// Something ought to have been previously logged so the developer can tell
+// what went wrong. The higher level helper functions will fail the test
+// and do the logging properly.
+func (c *C) Fail() {
+ c.status = failedSt
+}
+
+// FailNow marks the currently running test as failed and stops running it.
+// Something ought to have been previously logged so the developer can tell
+// what went wrong. The higher level helper functions will fail the test
+// and do the logging properly.
+func (c *C) FailNow() {
+ c.Fail()
+ c.stopNow()
+}
+
+// Succeed marks the currently running test as succeeded, undoing any
+// previous failures.
+func (c *C) Succeed() {
+ c.status = succeededSt
+}
+
+// SucceedNow marks the currently running test as succeeded, undoing any
+// previous failures, and stops running the test.
+func (c *C) SucceedNow() {
+ c.Succeed()
+ c.stopNow()
+}
+
+// ExpectFailure informs that the running test is knowingly broken for
+// the provided reason. If the test does not fail, an error will be reported
+// to raise attention to this fact. This method is useful to temporarily
+// disable tests which cover well known problems until a better time to
+// fix the problem is found, without forgetting about the fact that a
+// failure still exists.
+func (c *C) ExpectFailure(reason string) {
+ if reason == "" {
+ panic("Missing reason why the test is expected to fail")
+ }
+ c.mustFail = true
+ c.reason = reason
+}
+
+// Skip skips the running test for the provided reason. If run from within
+// SetUpTest, the individual test being set up will be skipped, and if run
+// from within SetUpSuite, the whole suite is skipped.
+func (c *C) Skip(reason string) {
+ if reason == "" {
+ panic("Missing reason why the test is being skipped")
+ }
+ c.reason = reason
+ c.status = skippedSt
+ c.stopNow()
+}
+
+// -----------------------------------------------------------------------
+// Basic logging.
+
+// GetTestLog returns the current test error output.
+func (c *C) GetTestLog() string {
+ return c.logb.String()
+}
+
+// Log logs some information into the test error output.
+// The provided arguments are assembled together into a string with fmt.Sprint.
+func (c *C) Log(args ...interface{}) {
+ c.log(args...)
+}
+
+// Log logs some information into the test error output.
+// The provided arguments are assembled together into a string with fmt.Sprintf.
+func (c *C) Logf(format string, args ...interface{}) {
+ c.logf(format, args...)
+}
+
+// Output enables *C to be used as a logger in functions that require only
+// the minimum interface of *log.Logger.
+func (c *C) Output(calldepth int, s string) error {
+ d := time.Now().Sub(c.startTime)
+ msec := d / time.Millisecond
+ sec := d / time.Second
+ min := d / time.Minute
+
+ c.Logf("[LOG] %d:%02d.%03d %s", min, sec%60, msec%1000, s)
+ return nil
+}
+
+// Error logs an error into the test error output and marks the test as failed.
+// The provided arguments are assembled together into a string with fmt.Sprint.
+func (c *C) Error(args ...interface{}) {
+ c.logCaller(1)
+ c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...)))
+ c.logNewLine()
+ c.Fail()
+}
+
+// Errorf logs an error into the test error output and marks the test as failed.
+// The provided arguments are assembled together into a string with fmt.Sprintf.
+func (c *C) Errorf(format string, args ...interface{}) {
+ c.logCaller(1)
+ c.logString(fmt.Sprintf("Error: "+format, args...))
+ c.logNewLine()
+ c.Fail()
+}
+
+// Fatal logs an error into the test error output, marks the test as failed, and
+// stops the test execution. The provided arguments are assembled together into
+// a string with fmt.Sprint.
+func (c *C) Fatal(args ...interface{}) {
+ c.logCaller(1)
+ c.logString(fmt.Sprint("Error: ", fmt.Sprint(args...)))
+ c.logNewLine()
+ c.FailNow()
+}
+
+// Fatlaf logs an error into the test error output, marks the test as failed, and
+// stops the test execution. The provided arguments are assembled together into
+// a string with fmt.Sprintf.
+func (c *C) Fatalf(format string, args ...interface{}) {
+ c.logCaller(1)
+ c.logString(fmt.Sprint("Error: ", fmt.Sprintf(format, args...)))
+ c.logNewLine()
+ c.FailNow()
+}
+
+// -----------------------------------------------------------------------
+// Generic checks and assertions based on checkers.
+
+// Check verifies if the first value matches the expected value according
+// to the provided checker. If they do not match, an error is logged, the
+// test is marked as failed, and the test execution continues.
+//
+// Some checkers may not need the expected argument (e.g. IsNil).
+//
+// Extra arguments provided to the function are logged next to the reported
+// problem when the matching fails.
+func (c *C) Check(obtained interface{}, checker Checker, args ...interface{}) bool {
+ return c.internalCheck("Check", obtained, checker, args...)
+}
+
+// Assert ensures that the first value matches the expected value according
+// to the provided checker. If they do not match, an error is logged, the
+// test is marked as failed, and the test execution stops.
+//
+// Some checkers may not need the expected argument (e.g. IsNil).
+//
+// Extra arguments provided to the function are logged next to the reported
+// problem when the matching fails.
+func (c *C) Assert(obtained interface{}, checker Checker, args ...interface{}) {
+ if !c.internalCheck("Assert", obtained, checker, args...) {
+ c.stopNow()
+ }
+}
+
+func (c *C) internalCheck(funcName string, obtained interface{}, checker Checker, args ...interface{}) bool {
+ if checker == nil {
+ c.logCaller(2)
+ c.logString(fmt.Sprintf("%s(obtained, nil!?, ...):", funcName))
+ c.logString("Oops.. you've provided a nil checker!")
+ c.logNewLine()
+ c.Fail()
+ return false
+ }
+
+ // If the last argument is a bug info, extract it out.
+ var comment CommentInterface
+ if len(args) > 0 {
+ if c, ok := args[len(args)-1].(CommentInterface); ok {
+ comment = c
+ args = args[:len(args)-1]
+ }
+ }
+
+ params := append([]interface{}{obtained}, args...)
+ info := checker.Info()
+
+ if len(params) != len(info.Params) {
+ names := append([]string{info.Params[0], info.Name}, info.Params[1:]...)
+ c.logCaller(2)
+ c.logString(fmt.Sprintf("%s(%s):", funcName, strings.Join(names, ", ")))
+ c.logString(fmt.Sprintf("Wrong number of parameters for %s: want %d, got %d", info.Name, len(names), len(params)+1))
+ c.logNewLine()
+ c.Fail()
+ return false
+ }
+
+ // Copy since it may be mutated by Check.
+ names := append([]string{}, info.Params...)
+
+ // Do the actual check.
+ result, error := checker.Check(params, names)
+ if !result || error != "" {
+ c.logCaller(2)
+ for i := 0; i != len(params); i++ {
+ c.logValue(names[i], params[i])
+ }
+ if comment != nil {
+ c.logString(comment.CheckCommentString())
+ }
+ if error != "" {
+ c.logString(error)
+ }
+ c.logNewLine()
+ c.Fail()
+ return false
+ }
+ return true
+}
diff --git a/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/helpers_test.go b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/helpers_test.go
new file mode 100644
index 0000000000..704ee10159
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/helpers_test.go
@@ -0,0 +1,519 @@
+// These tests verify the inner workings of the helper methods associated
+// with check.T.
+
+package check_test
+
+import (
+ "github.com/magiconair/properties/_third_party/gopkg.in/check.v1"
+ "os"
+ "reflect"
+ "runtime"
+ "sync"
+)
+
+var helpersS = check.Suite(&HelpersS{})
+
+type HelpersS struct{}
+
+func (s *HelpersS) TestCountSuite(c *check.C) {
+ suitesRun += 1
+}
+
+// -----------------------------------------------------------------------
+// Fake checker and bug info to verify the behavior of Assert() and Check().
+
+type MyChecker struct {
+ info *check.CheckerInfo
+ params []interface{}
+ names []string
+ result bool
+ error string
+}
+
+func (checker *MyChecker) Info() *check.CheckerInfo {
+ if checker.info == nil {
+ return &check.CheckerInfo{Name: "MyChecker", Params: []string{"myobtained", "myexpected"}}
+ }
+ return checker.info
+}
+
+func (checker *MyChecker) Check(params []interface{}, names []string) (bool, string) {
+ rparams := checker.params
+ rnames := checker.names
+ checker.params = append([]interface{}{}, params...)
+ checker.names = append([]string{}, names...)
+ if rparams != nil {
+ copy(params, rparams)
+ }
+ if rnames != nil {
+ copy(names, rnames)
+ }
+ return checker.result, checker.error
+}
+
+type myCommentType string
+
+func (c myCommentType) CheckCommentString() string {
+ return string(c)
+}
+
+func myComment(s string) myCommentType {
+ return myCommentType(s)
+}
+
+// -----------------------------------------------------------------------
+// Ensure a real checker actually works fine.
+
+func (s *HelpersS) TestCheckerInterface(c *check.C) {
+ testHelperSuccess(c, "Check(1, Equals, 1)", true, func() interface{} {
+ return c.Check(1, check.Equals, 1)
+ })
+}
+
+// -----------------------------------------------------------------------
+// Tests for Check(), mostly the same as for Assert() following these.
+
+func (s *HelpersS) TestCheckSucceedWithExpected(c *check.C) {
+ checker := &MyChecker{result: true}
+ testHelperSuccess(c, "Check(1, checker, 2)", true, func() interface{} {
+ return c.Check(1, checker, 2)
+ })
+ if !reflect.DeepEqual(checker.params, []interface{}{1, 2}) {
+ c.Fatalf("Bad params for check: %#v", checker.params)
+ }
+}
+
+func (s *HelpersS) TestCheckSucceedWithoutExpected(c *check.C) {
+ checker := &MyChecker{result: true, info: &check.CheckerInfo{Params: []string{"myvalue"}}}
+ testHelperSuccess(c, "Check(1, checker)", true, func() interface{} {
+ return c.Check(1, checker)
+ })
+ if !reflect.DeepEqual(checker.params, []interface{}{1}) {
+ c.Fatalf("Bad params for check: %#v", checker.params)
+ }
+}
+
+func (s *HelpersS) TestCheckFailWithExpected(c *check.C) {
+ checker := &MyChecker{result: false}
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " return c\\.Check\\(1, checker, 2\\)\n" +
+ "\\.+ myobtained int = 1\n" +
+ "\\.+ myexpected int = 2\n\n"
+ testHelperFailure(c, "Check(1, checker, 2)", false, false, log,
+ func() interface{} {
+ return c.Check(1, checker, 2)
+ })
+}
+
+func (s *HelpersS) TestCheckFailWithExpectedAndComment(c *check.C) {
+ checker := &MyChecker{result: false}
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " return c\\.Check\\(1, checker, 2, myComment\\(\"Hello world!\"\\)\\)\n" +
+ "\\.+ myobtained int = 1\n" +
+ "\\.+ myexpected int = 2\n" +
+ "\\.+ Hello world!\n\n"
+ testHelperFailure(c, "Check(1, checker, 2, msg)", false, false, log,
+ func() interface{} {
+ return c.Check(1, checker, 2, myComment("Hello world!"))
+ })
+}
+
+func (s *HelpersS) TestCheckFailWithExpectedAndStaticComment(c *check.C) {
+ checker := &MyChecker{result: false}
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " // Nice leading comment\\.\n" +
+ " return c\\.Check\\(1, checker, 2\\) // Hello there\n" +
+ "\\.+ myobtained int = 1\n" +
+ "\\.+ myexpected int = 2\n\n"
+ testHelperFailure(c, "Check(1, checker, 2, msg)", false, false, log,
+ func() interface{} {
+ // Nice leading comment.
+ return c.Check(1, checker, 2) // Hello there
+ })
+}
+
+func (s *HelpersS) TestCheckFailWithoutExpected(c *check.C) {
+ checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}}
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " return c\\.Check\\(1, checker\\)\n" +
+ "\\.+ myvalue int = 1\n\n"
+ testHelperFailure(c, "Check(1, checker)", false, false, log,
+ func() interface{} {
+ return c.Check(1, checker)
+ })
+}
+
+func (s *HelpersS) TestCheckFailWithoutExpectedAndMessage(c *check.C) {
+ checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}}
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " return c\\.Check\\(1, checker, myComment\\(\"Hello world!\"\\)\\)\n" +
+ "\\.+ myvalue int = 1\n" +
+ "\\.+ Hello world!\n\n"
+ testHelperFailure(c, "Check(1, checker, msg)", false, false, log,
+ func() interface{} {
+ return c.Check(1, checker, myComment("Hello world!"))
+ })
+}
+
+func (s *HelpersS) TestCheckWithMissingExpected(c *check.C) {
+ checker := &MyChecker{result: true}
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " return c\\.Check\\(1, checker\\)\n" +
+ "\\.+ Check\\(myobtained, MyChecker, myexpected\\):\n" +
+ "\\.+ Wrong number of parameters for MyChecker: " +
+ "want 3, got 2\n\n"
+ testHelperFailure(c, "Check(1, checker, !?)", false, false, log,
+ func() interface{} {
+ return c.Check(1, checker)
+ })
+}
+
+func (s *HelpersS) TestCheckWithTooManyExpected(c *check.C) {
+ checker := &MyChecker{result: true}
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " return c\\.Check\\(1, checker, 2, 3\\)\n" +
+ "\\.+ Check\\(myobtained, MyChecker, myexpected\\):\n" +
+ "\\.+ Wrong number of parameters for MyChecker: " +
+ "want 3, got 4\n\n"
+ testHelperFailure(c, "Check(1, checker, 2, 3)", false, false, log,
+ func() interface{} {
+ return c.Check(1, checker, 2, 3)
+ })
+}
+
+func (s *HelpersS) TestCheckWithError(c *check.C) {
+ checker := &MyChecker{result: false, error: "Some not so cool data provided!"}
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " return c\\.Check\\(1, checker, 2\\)\n" +
+ "\\.+ myobtained int = 1\n" +
+ "\\.+ myexpected int = 2\n" +
+ "\\.+ Some not so cool data provided!\n\n"
+ testHelperFailure(c, "Check(1, checker, 2)", false, false, log,
+ func() interface{} {
+ return c.Check(1, checker, 2)
+ })
+}
+
+func (s *HelpersS) TestCheckWithNilChecker(c *check.C) {
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " return c\\.Check\\(1, nil\\)\n" +
+ "\\.+ Check\\(obtained, nil!\\?, \\.\\.\\.\\):\n" +
+ "\\.+ Oops\\.\\. you've provided a nil checker!\n\n"
+ testHelperFailure(c, "Check(obtained, nil)", false, false, log,
+ func() interface{} {
+ return c.Check(1, nil)
+ })
+}
+
+func (s *HelpersS) TestCheckWithParamsAndNamesMutation(c *check.C) {
+ checker := &MyChecker{result: false, params: []interface{}{3, 4}, names: []string{"newobtained", "newexpected"}}
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " return c\\.Check\\(1, checker, 2\\)\n" +
+ "\\.+ newobtained int = 3\n" +
+ "\\.+ newexpected int = 4\n\n"
+ testHelperFailure(c, "Check(1, checker, 2) with mutation", false, false, log,
+ func() interface{} {
+ return c.Check(1, checker, 2)
+ })
+}
+
+// -----------------------------------------------------------------------
+// Tests for Assert(), mostly the same as for Check() above.
+
+func (s *HelpersS) TestAssertSucceedWithExpected(c *check.C) {
+ checker := &MyChecker{result: true}
+ testHelperSuccess(c, "Assert(1, checker, 2)", nil, func() interface{} {
+ c.Assert(1, checker, 2)
+ return nil
+ })
+ if !reflect.DeepEqual(checker.params, []interface{}{1, 2}) {
+ c.Fatalf("Bad params for check: %#v", checker.params)
+ }
+}
+
+func (s *HelpersS) TestAssertSucceedWithoutExpected(c *check.C) {
+ checker := &MyChecker{result: true, info: &check.CheckerInfo{Params: []string{"myvalue"}}}
+ testHelperSuccess(c, "Assert(1, checker)", nil, func() interface{} {
+ c.Assert(1, checker)
+ return nil
+ })
+ if !reflect.DeepEqual(checker.params, []interface{}{1}) {
+ c.Fatalf("Bad params for check: %#v", checker.params)
+ }
+}
+
+func (s *HelpersS) TestAssertFailWithExpected(c *check.C) {
+ checker := &MyChecker{result: false}
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " c\\.Assert\\(1, checker, 2\\)\n" +
+ "\\.+ myobtained int = 1\n" +
+ "\\.+ myexpected int = 2\n\n"
+ testHelperFailure(c, "Assert(1, checker, 2)", nil, true, log,
+ func() interface{} {
+ c.Assert(1, checker, 2)
+ return nil
+ })
+}
+
+func (s *HelpersS) TestAssertFailWithExpectedAndMessage(c *check.C) {
+ checker := &MyChecker{result: false}
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " c\\.Assert\\(1, checker, 2, myComment\\(\"Hello world!\"\\)\\)\n" +
+ "\\.+ myobtained int = 1\n" +
+ "\\.+ myexpected int = 2\n" +
+ "\\.+ Hello world!\n\n"
+ testHelperFailure(c, "Assert(1, checker, 2, msg)", nil, true, log,
+ func() interface{} {
+ c.Assert(1, checker, 2, myComment("Hello world!"))
+ return nil
+ })
+}
+
+func (s *HelpersS) TestAssertFailWithoutExpected(c *check.C) {
+ checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}}
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " c\\.Assert\\(1, checker\\)\n" +
+ "\\.+ myvalue int = 1\n\n"
+ testHelperFailure(c, "Assert(1, checker)", nil, true, log,
+ func() interface{} {
+ c.Assert(1, checker)
+ return nil
+ })
+}
+
+func (s *HelpersS) TestAssertFailWithoutExpectedAndMessage(c *check.C) {
+ checker := &MyChecker{result: false, info: &check.CheckerInfo{Params: []string{"myvalue"}}}
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " c\\.Assert\\(1, checker, myComment\\(\"Hello world!\"\\)\\)\n" +
+ "\\.+ myvalue int = 1\n" +
+ "\\.+ Hello world!\n\n"
+ testHelperFailure(c, "Assert(1, checker, msg)", nil, true, log,
+ func() interface{} {
+ c.Assert(1, checker, myComment("Hello world!"))
+ return nil
+ })
+}
+
+func (s *HelpersS) TestAssertWithMissingExpected(c *check.C) {
+ checker := &MyChecker{result: true}
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " c\\.Assert\\(1, checker\\)\n" +
+ "\\.+ Assert\\(myobtained, MyChecker, myexpected\\):\n" +
+ "\\.+ Wrong number of parameters for MyChecker: " +
+ "want 3, got 2\n\n"
+ testHelperFailure(c, "Assert(1, checker, !?)", nil, true, log,
+ func() interface{} {
+ c.Assert(1, checker)
+ return nil
+ })
+}
+
+func (s *HelpersS) TestAssertWithError(c *check.C) {
+ checker := &MyChecker{result: false, error: "Some not so cool data provided!"}
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " c\\.Assert\\(1, checker, 2\\)\n" +
+ "\\.+ myobtained int = 1\n" +
+ "\\.+ myexpected int = 2\n" +
+ "\\.+ Some not so cool data provided!\n\n"
+ testHelperFailure(c, "Assert(1, checker, 2)", nil, true, log,
+ func() interface{} {
+ c.Assert(1, checker, 2)
+ return nil
+ })
+}
+
+func (s *HelpersS) TestAssertWithNilChecker(c *check.C) {
+ log := "(?s)helpers_test\\.go:[0-9]+:.*\nhelpers_test\\.go:[0-9]+:\n" +
+ " c\\.Assert\\(1, nil\\)\n" +
+ "\\.+ Assert\\(obtained, nil!\\?, \\.\\.\\.\\):\n" +
+ "\\.+ Oops\\.\\. you've provided a nil checker!\n\n"
+ testHelperFailure(c, "Assert(obtained, nil)", nil, true, log,
+ func() interface{} {
+ c.Assert(1, nil)
+ return nil
+ })
+}
+
+// -----------------------------------------------------------------------
+// Ensure that values logged work properly in some interesting cases.
+
+func (s *HelpersS) TestValueLoggingWithArrays(c *check.C) {
+ checker := &MyChecker{result: false}
+ log := "(?s)helpers_test.go:[0-9]+:.*\nhelpers_test.go:[0-9]+:\n" +
+ " return c\\.Check\\(\\[\\]byte{1, 2}, checker, \\[\\]byte{1, 3}\\)\n" +
+ "\\.+ myobtained \\[\\]uint8 = \\[\\]byte{0x1, 0x2}\n" +
+ "\\.+ myexpected \\[\\]uint8 = \\[\\]byte{0x1, 0x3}\n\n"
+ testHelperFailure(c, "Check([]byte{1}, chk, []byte{3})", false, false, log,
+ func() interface{} {
+ return c.Check([]byte{1, 2}, checker, []byte{1, 3})
+ })
+}
+
+func (s *HelpersS) TestValueLoggingWithMultiLine(c *check.C) {
+ checker := &MyChecker{result: false}
+ log := "(?s)helpers_test.go:[0-9]+:.*\nhelpers_test.go:[0-9]+:\n" +
+ " return c\\.Check\\(\"a\\\\nb\\\\n\", checker, \"a\\\\nb\\\\nc\"\\)\n" +
+ "\\.+ myobtained string = \"\" \\+\n" +
+ "\\.+ \"a\\\\n\" \\+\n" +
+ "\\.+ \"b\\\\n\"\n" +
+ "\\.+ myexpected string = \"\" \\+\n" +
+ "\\.+ \"a\\\\n\" \\+\n" +
+ "\\.+ \"b\\\\n\" \\+\n" +
+ "\\.+ \"c\"\n\n"
+ testHelperFailure(c, `Check("a\nb\n", chk, "a\nb\nc")`, false, false, log,
+ func() interface{} {
+ return c.Check("a\nb\n", checker, "a\nb\nc")
+ })
+}
+
+func (s *HelpersS) TestValueLoggingWithMultiLineException(c *check.C) {
+ // If the newline is at the end of the string, don't log as multi-line.
+ checker := &MyChecker{result: false}
+ log := "(?s)helpers_test.go:[0-9]+:.*\nhelpers_test.go:[0-9]+:\n" +
+ " return c\\.Check\\(\"a b\\\\n\", checker, \"a\\\\nb\"\\)\n" +
+ "\\.+ myobtained string = \"a b\\\\n\"\n" +
+ "\\.+ myexpected string = \"\" \\+\n" +
+ "\\.+ \"a\\\\n\" \\+\n" +
+ "\\.+ \"b\"\n\n"
+ testHelperFailure(c, `Check("a b\n", chk, "a\nb")`, false, false, log,
+ func() interface{} {
+ return c.Check("a b\n", checker, "a\nb")
+ })
+}
+
+// -----------------------------------------------------------------------
+// MakeDir() tests.
+
+type MkDirHelper struct {
+ path1 string
+ path2 string
+ isDir1 bool
+ isDir2 bool
+ isDir3 bool
+ isDir4 bool
+}
+
+func (s *MkDirHelper) SetUpSuite(c *check.C) {
+ s.path1 = c.MkDir()
+ s.isDir1 = isDir(s.path1)
+}
+
+func (s *MkDirHelper) Test(c *check.C) {
+ s.path2 = c.MkDir()
+ s.isDir2 = isDir(s.path2)
+}
+
+func (s *MkDirHelper) TearDownSuite(c *check.C) {
+ s.isDir3 = isDir(s.path1)
+ s.isDir4 = isDir(s.path2)
+}
+
+func (s *HelpersS) TestMkDir(c *check.C) {
+ helper := MkDirHelper{}
+ output := String{}
+ check.Run(&helper, &check.RunConf{Output: &output})
+ c.Assert(output.value, check.Equals, "")
+ c.Check(helper.isDir1, check.Equals, true)
+ c.Check(helper.isDir2, check.Equals, true)
+ c.Check(helper.isDir3, check.Equals, true)
+ c.Check(helper.isDir4, check.Equals, true)
+ c.Check(helper.path1, check.Not(check.Equals),
+ helper.path2)
+ c.Check(isDir(helper.path1), check.Equals, false)
+ c.Check(isDir(helper.path2), check.Equals, false)
+}
+
+func isDir(path string) bool {
+ if stat, err := os.Stat(path); err == nil {
+ return stat.IsDir()
+ }
+ return false
+}
+
+// Concurrent logging should not corrupt the underling buffer.
+// Use go test -race to detect the race in this test.
+func (s *HelpersS) TestConcurrentLogging(c *check.C) {
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(runtime.NumCPU()))
+ var start, stop sync.WaitGroup
+ start.Add(1)
+ for i, n := 0, runtime.NumCPU()*2; i < n; i++ {
+ stop.Add(1)
+ go func(i int) {
+ start.Wait()
+ for j := 0; j < 30; j++ {
+ c.Logf("Worker %d: line %d", i, j)
+ }
+ stop.Done()
+ }(i)
+ }
+ start.Done()
+ stop.Wait()
+}
+
+// -----------------------------------------------------------------------
+// Test the TestName function
+
+type TestNameHelper struct {
+ name1 string
+ name2 string
+ name3 string
+ name4 string
+ name5 string
+}
+
+func (s *TestNameHelper) SetUpSuite(c *check.C) { s.name1 = c.TestName() }
+func (s *TestNameHelper) SetUpTest(c *check.C) { s.name2 = c.TestName() }
+func (s *TestNameHelper) Test(c *check.C) { s.name3 = c.TestName() }
+func (s *TestNameHelper) TearDownTest(c *check.C) { s.name4 = c.TestName() }
+func (s *TestNameHelper) TearDownSuite(c *check.C) { s.name5 = c.TestName() }
+
+func (s *HelpersS) TestTestName(c *check.C) {
+ helper := TestNameHelper{}
+ output := String{}
+ check.Run(&helper, &check.RunConf{Output: &output})
+ c.Check(helper.name1, check.Equals, "")
+ c.Check(helper.name2, check.Equals, "TestNameHelper.Test")
+ c.Check(helper.name3, check.Equals, "TestNameHelper.Test")
+ c.Check(helper.name4, check.Equals, "TestNameHelper.Test")
+ c.Check(helper.name5, check.Equals, "")
+}
+
+// -----------------------------------------------------------------------
+// A couple of helper functions to test helper functions. :-)
+
+func testHelperSuccess(c *check.C, name string, expectedResult interface{}, closure func() interface{}) {
+ var result interface{}
+ defer (func() {
+ if err := recover(); err != nil {
+ panic(err)
+ }
+ checkState(c, result,
+ &expectedState{
+ name: name,
+ result: expectedResult,
+ failed: false,
+ log: "",
+ })
+ })()
+ result = closure()
+}
+
+func testHelperFailure(c *check.C, name string, expectedResult interface{}, shouldStop bool, log string, closure func() interface{}) {
+ var result interface{}
+ defer (func() {
+ if err := recover(); err != nil {
+ panic(err)
+ }
+ checkState(c, result,
+ &expectedState{
+ name: name,
+ result: expectedResult,
+ failed: true,
+ log: log,
+ })
+ })()
+ result = closure()
+ if shouldStop {
+ c.Logf("%s didn't stop when it should", name)
+ }
+}
diff --git a/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/printer.go b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/printer.go
new file mode 100644
index 0000000000..e0f7557b5c
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/printer.go
@@ -0,0 +1,168 @@
+package check
+
+import (
+ "bytes"
+ "go/ast"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "os"
+)
+
+func indent(s, with string) (r string) {
+ eol := true
+ for i := 0; i != len(s); i++ {
+ c := s[i]
+ switch {
+ case eol && c == '\n' || c == '\r':
+ case c == '\n' || c == '\r':
+ eol = true
+ case eol:
+ eol = false
+ s = s[:i] + with + s[i:]
+ i += len(with)
+ }
+ }
+ return s
+}
+
+func printLine(filename string, line int) (string, error) {
+ fset := token.NewFileSet()
+ file, err := os.Open(filename)
+ if err != nil {
+ return "", err
+ }
+ fnode, err := parser.ParseFile(fset, filename, file, parser.ParseComments)
+ if err != nil {
+ return "", err
+ }
+ config := &printer.Config{Mode: printer.UseSpaces, Tabwidth: 4}
+ lp := &linePrinter{fset: fset, fnode: fnode, line: line, config: config}
+ ast.Walk(lp, fnode)
+ result := lp.output.Bytes()
+ // Comments leave \n at the end.
+ n := len(result)
+ for n > 0 && result[n-1] == '\n' {
+ n--
+ }
+ return string(result[:n]), nil
+}
+
+type linePrinter struct {
+ config *printer.Config
+ fset *token.FileSet
+ fnode *ast.File
+ line int
+ output bytes.Buffer
+ stmt ast.Stmt
+}
+
+func (lp *linePrinter) emit() bool {
+ if lp.stmt != nil {
+ lp.trim(lp.stmt)
+ lp.printWithComments(lp.stmt)
+ lp.stmt = nil
+ return true
+ }
+ return false
+}
+
+func (lp *linePrinter) printWithComments(n ast.Node) {
+ nfirst := lp.fset.Position(n.Pos()).Line
+ nlast := lp.fset.Position(n.End()).Line
+ for _, g := range lp.fnode.Comments {
+ cfirst := lp.fset.Position(g.Pos()).Line
+ clast := lp.fset.Position(g.End()).Line
+ if clast == nfirst-1 && lp.fset.Position(n.Pos()).Column == lp.fset.Position(g.Pos()).Column {
+ for _, c := range g.List {
+ lp.output.WriteString(c.Text)
+ lp.output.WriteByte('\n')
+ }
+ }
+ if cfirst >= nfirst && cfirst <= nlast && n.End() <= g.List[0].Slash {
+ // The printer will not include the comment if it starts past
+ // the node itself. Trick it into printing by overlapping the
+ // slash with the end of the statement.
+ g.List[0].Slash = n.End() - 1
+ }
+ }
+ node := &printer.CommentedNode{n, lp.fnode.Comments}
+ lp.config.Fprint(&lp.output, lp.fset, node)
+}
+
+func (lp *linePrinter) Visit(n ast.Node) (w ast.Visitor) {
+ if n == nil {
+ if lp.output.Len() == 0 {
+ lp.emit()
+ }
+ return nil
+ }
+ first := lp.fset.Position(n.Pos()).Line
+ last := lp.fset.Position(n.End()).Line
+ if first <= lp.line && last >= lp.line {
+ // Print the innermost statement containing the line.
+ if stmt, ok := n.(ast.Stmt); ok {
+ if _, ok := n.(*ast.BlockStmt); !ok {
+ lp.stmt = stmt
+ }
+ }
+ if first == lp.line && lp.emit() {
+ return nil
+ }
+ return lp
+ }
+ return nil
+}
+
+func (lp *linePrinter) trim(n ast.Node) bool {
+ stmt, ok := n.(ast.Stmt)
+ if !ok {
+ return true
+ }
+ line := lp.fset.Position(n.Pos()).Line
+ if line != lp.line {
+ return false
+ }
+ switch stmt := stmt.(type) {
+ case *ast.IfStmt:
+ stmt.Body = lp.trimBlock(stmt.Body)
+ case *ast.SwitchStmt:
+ stmt.Body = lp.trimBlock(stmt.Body)
+ case *ast.TypeSwitchStmt:
+ stmt.Body = lp.trimBlock(stmt.Body)
+ case *ast.CaseClause:
+ stmt.Body = lp.trimList(stmt.Body)
+ case *ast.CommClause:
+ stmt.Body = lp.trimList(stmt.Body)
+ case *ast.BlockStmt:
+ stmt.List = lp.trimList(stmt.List)
+ }
+ return true
+}
+
+func (lp *linePrinter) trimBlock(stmt *ast.BlockStmt) *ast.BlockStmt {
+ if !lp.trim(stmt) {
+ return lp.emptyBlock(stmt)
+ }
+ stmt.Rbrace = stmt.Lbrace
+ return stmt
+}
+
+func (lp *linePrinter) trimList(stmts []ast.Stmt) []ast.Stmt {
+ for i := 0; i != len(stmts); i++ {
+ if !lp.trim(stmts[i]) {
+ stmts[i] = lp.emptyStmt(stmts[i])
+ break
+ }
+ }
+ return stmts
+}
+
+func (lp *linePrinter) emptyStmt(n ast.Node) *ast.ExprStmt {
+ return &ast.ExprStmt{&ast.Ellipsis{n.Pos(), nil}}
+}
+
+func (lp *linePrinter) emptyBlock(n ast.Node) *ast.BlockStmt {
+ p := n.Pos()
+ return &ast.BlockStmt{p, []ast.Stmt{lp.emptyStmt(n)}, p}
+}
diff --git a/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/printer_test.go b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/printer_test.go
new file mode 100644
index 0000000000..47f94a7fa8
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/printer_test.go
@@ -0,0 +1,109 @@
+package check_test
+
+import (
+ . "github.com/magiconair/properties/_third_party/gopkg.in/check.v1"
+)
+
+var _ = Suite(&PrinterS{})
+
+type PrinterS struct{}
+
+func (s *PrinterS) TestCountSuite(c *C) {
+ suitesRun += 1
+}
+
+var printTestFuncLine int
+
+func init() {
+ printTestFuncLine = getMyLine() + 3
+}
+
+func printTestFunc() {
+ println(1) // Comment1
+ if 2 == 2 { // Comment2
+ println(3) // Comment3
+ }
+ switch 5 {
+ case 6:
+ println(6) // Comment6
+ println(7)
+ }
+ switch interface{}(9).(type) { // Comment9
+ case int:
+ println(10)
+ println(11)
+ }
+ select {
+ case <-(chan bool)(nil):
+ println(14)
+ println(15)
+ default:
+ println(16)
+ println(17)
+ }
+ println(19,
+ 20)
+ _ = func() {
+ println(21)
+ println(22)
+ }
+ println(24, func() {
+ println(25)
+ })
+ // Leading comment
+ // with multiple lines.
+ println(29) // Comment29
+}
+
+var printLineTests = []struct {
+ line int
+ output string
+}{
+ {1, "println(1) // Comment1"},
+ {2, "if 2 == 2 { // Comment2\n ...\n}"},
+ {3, "println(3) // Comment3"},
+ {5, "switch 5 {\n...\n}"},
+ {6, "case 6:\n println(6) // Comment6\n ..."},
+ {7, "println(7)"},
+ {9, "switch interface{}(9).(type) { // Comment9\n...\n}"},
+ {10, "case int:\n println(10)\n ..."},
+ {14, "case <-(chan bool)(nil):\n println(14)\n ..."},
+ {15, "println(15)"},
+ {16, "default:\n println(16)\n ..."},
+ {17, "println(17)"},
+ {19, "println(19,\n 20)"},
+ {20, "println(19,\n 20)"},
+ {21, "_ = func() {\n println(21)\n println(22)\n}"},
+ {22, "println(22)"},
+ {24, "println(24, func() {\n println(25)\n})"},
+ {25, "println(25)"},
+ {26, "println(24, func() {\n println(25)\n})"},
+ {29, "// Leading comment\n// with multiple lines.\nprintln(29) // Comment29"},
+}
+
+func (s *PrinterS) TestPrintLine(c *C) {
+ for _, test := range printLineTests {
+ output, err := PrintLine("printer_test.go", printTestFuncLine+test.line)
+ c.Assert(err, IsNil)
+ c.Assert(output, Equals, test.output)
+ }
+}
+
+var indentTests = []struct {
+ in, out string
+}{
+ {"", ""},
+ {"\n", "\n"},
+ {"a", ">>>a"},
+ {"a\n", ">>>a\n"},
+ {"a\nb", ">>>a\n>>>b"},
+ {" ", ">>> "},
+}
+
+func (s *PrinterS) TestIndent(c *C) {
+ for _, test := range indentTests {
+ out := Indent(test.in, ">>>")
+ c.Assert(out, Equals, test.out)
+ }
+
+}
diff --git a/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/run.go b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/run.go
new file mode 100644
index 0000000000..da8fd79872
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/run.go
@@ -0,0 +1,175 @@
+package check
+
+import (
+ "bufio"
+ "flag"
+ "fmt"
+ "os"
+ "testing"
+ "time"
+)
+
+// -----------------------------------------------------------------------
+// Test suite registry.
+
+var allSuites []interface{}
+
+// Suite registers the given value as a test suite to be run. Any methods
+// starting with the Test prefix in the given value will be considered as
+// a test method.
+func Suite(suite interface{}) interface{} {
+ allSuites = append(allSuites, suite)
+ return suite
+}
+
+// -----------------------------------------------------------------------
+// Public running interface.
+
+var (
+ oldFilterFlag = flag.String("gocheck.f", "", "Regular expression selecting which tests and/or suites to run")
+ oldVerboseFlag = flag.Bool("gocheck.v", false, "Verbose mode")
+ oldStreamFlag = flag.Bool("gocheck.vv", false, "Super verbose mode (disables output caching)")
+ oldBenchFlag = flag.Bool("gocheck.b", false, "Run benchmarks")
+ oldBenchTime = flag.Duration("gocheck.btime", 1*time.Second, "approximate run time for each benchmark")
+ oldListFlag = flag.Bool("gocheck.list", false, "List the names of all tests that will be run")
+ oldWorkFlag = flag.Bool("gocheck.work", false, "Display and do not remove the test working directory")
+
+ newFilterFlag = flag.String("check.f", "", "Regular expression selecting which tests and/or suites to run")
+ newVerboseFlag = flag.Bool("check.v", false, "Verbose mode")
+ newStreamFlag = flag.Bool("check.vv", false, "Super verbose mode (disables output caching)")
+ newBenchFlag = flag.Bool("check.b", false, "Run benchmarks")
+ newBenchTime = flag.Duration("check.btime", 1*time.Second, "approximate run time for each benchmark")
+ newBenchMem = flag.Bool("check.bmem", false, "Report memory benchmarks")
+ newListFlag = flag.Bool("check.list", false, "List the names of all tests that will be run")
+ newWorkFlag = flag.Bool("check.work", false, "Display and do not remove the test working directory")
+)
+
+// TestingT runs all test suites registered with the Suite function,
+// printing results to stdout, and reporting any failures back to
+// the "testing" package.
+func TestingT(testingT *testing.T) {
+ benchTime := *newBenchTime
+ if benchTime == 1*time.Second {
+ benchTime = *oldBenchTime
+ }
+ conf := &RunConf{
+ Filter: *oldFilterFlag + *newFilterFlag,
+ Verbose: *oldVerboseFlag || *newVerboseFlag,
+ Stream: *oldStreamFlag || *newStreamFlag,
+ Benchmark: *oldBenchFlag || *newBenchFlag,
+ BenchmarkTime: benchTime,
+ BenchmarkMem: *newBenchMem,
+ KeepWorkDir: *oldWorkFlag || *newWorkFlag,
+ }
+ if *oldListFlag || *newListFlag {
+ w := bufio.NewWriter(os.Stdout)
+ for _, name := range ListAll(conf) {
+ fmt.Fprintln(w, name)
+ }
+ w.Flush()
+ return
+ }
+ result := RunAll(conf)
+ println(result.String())
+ if !result.Passed() {
+ testingT.Fail()
+ }
+}
+
+// RunAll runs all test suites registered with the Suite function, using the
+// provided run configuration.
+func RunAll(runConf *RunConf) *Result {
+ result := Result{}
+ for _, suite := range allSuites {
+ result.Add(Run(suite, runConf))
+ }
+ return &result
+}
+
+// Run runs the provided test suite using the provided run configuration.
+func Run(suite interface{}, runConf *RunConf) *Result {
+ runner := newSuiteRunner(suite, runConf)
+ return runner.run()
+}
+
+// ListAll returns the names of all the test functions registered with the
+// Suite function that will be run with the provided run configuration.
+func ListAll(runConf *RunConf) []string {
+ var names []string
+ for _, suite := range allSuites {
+ names = append(names, List(suite, runConf)...)
+ }
+ return names
+}
+
+// List returns the names of the test functions in the given
+// suite that will be run with the provided run configuration.
+func List(suite interface{}, runConf *RunConf) []string {
+ var names []string
+ runner := newSuiteRunner(suite, runConf)
+ for _, t := range runner.tests {
+ names = append(names, t.String())
+ }
+ return names
+}
+
+// -----------------------------------------------------------------------
+// Result methods.
+
+func (r *Result) Add(other *Result) {
+ r.Succeeded += other.Succeeded
+ r.Skipped += other.Skipped
+ r.Failed += other.Failed
+ r.Panicked += other.Panicked
+ r.FixturePanicked += other.FixturePanicked
+ r.ExpectedFailures += other.ExpectedFailures
+ r.Missed += other.Missed
+ if r.WorkDir != "" && other.WorkDir != "" {
+ r.WorkDir += ":" + other.WorkDir
+ } else if other.WorkDir != "" {
+ r.WorkDir = other.WorkDir
+ }
+}
+
+func (r *Result) Passed() bool {
+ return (r.Failed == 0 && r.Panicked == 0 &&
+ r.FixturePanicked == 0 && r.Missed == 0 &&
+ r.RunError == nil)
+}
+
+func (r *Result) String() string {
+ if r.RunError != nil {
+ return "ERROR: " + r.RunError.Error()
+ }
+
+ var value string
+ if r.Failed == 0 && r.Panicked == 0 && r.FixturePanicked == 0 &&
+ r.Missed == 0 {
+ value = "OK: "
+ } else {
+ value = "OOPS: "
+ }
+ value += fmt.Sprintf("%d passed", r.Succeeded)
+ if r.Skipped != 0 {
+ value += fmt.Sprintf(", %d skipped", r.Skipped)
+ }
+ if r.ExpectedFailures != 0 {
+ value += fmt.Sprintf(", %d expected failures", r.ExpectedFailures)
+ }
+ if r.Failed != 0 {
+ value += fmt.Sprintf(", %d FAILED", r.Failed)
+ }
+ if r.Panicked != 0 {
+ value += fmt.Sprintf(", %d PANICKED", r.Panicked)
+ }
+ if r.FixturePanicked != 0 {
+ value += fmt.Sprintf(", %d FIXTURE-PANICKED", r.FixturePanicked)
+ }
+ if r.Missed != 0 {
+ value += fmt.Sprintf(", %d MISSED", r.Missed)
+ }
+ if r.WorkDir != "" {
+ value += "\nWORK=" + r.WorkDir
+ }
+ return value
+}
diff --git a/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/run_test.go b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/run_test.go
new file mode 100644
index 0000000000..4444cacf9c
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/run_test.go
@@ -0,0 +1,419 @@
+// These tests verify the test running logic.
+
+package check_test
+
+import (
+ "errors"
+ . "github.com/magiconair/properties/_third_party/gopkg.in/check.v1"
+ "os"
+ "sync"
+)
+
+var runnerS = Suite(&RunS{})
+
+type RunS struct{}
+
+func (s *RunS) TestCountSuite(c *C) {
+ suitesRun += 1
+}
+
+// -----------------------------------------------------------------------
+// Tests ensuring result counting works properly.
+
+func (s *RunS) TestSuccess(c *C) {
+ output := String{}
+ result := Run(&SuccessHelper{}, &RunConf{Output: &output})
+ c.Check(result.Succeeded, Equals, 1)
+ c.Check(result.Failed, Equals, 0)
+ c.Check(result.Skipped, Equals, 0)
+ c.Check(result.Panicked, Equals, 0)
+ c.Check(result.FixturePanicked, Equals, 0)
+ c.Check(result.Missed, Equals, 0)
+ c.Check(result.RunError, IsNil)
+}
+
+func (s *RunS) TestFailure(c *C) {
+ output := String{}
+ result := Run(&FailHelper{}, &RunConf{Output: &output})
+ c.Check(result.Succeeded, Equals, 0)
+ c.Check(result.Failed, Equals, 1)
+ c.Check(result.Skipped, Equals, 0)
+ c.Check(result.Panicked, Equals, 0)
+ c.Check(result.FixturePanicked, Equals, 0)
+ c.Check(result.Missed, Equals, 0)
+ c.Check(result.RunError, IsNil)
+}
+
+func (s *RunS) TestFixture(c *C) {
+ output := String{}
+ result := Run(&FixtureHelper{}, &RunConf{Output: &output})
+ c.Check(result.Succeeded, Equals, 2)
+ c.Check(result.Failed, Equals, 0)
+ c.Check(result.Skipped, Equals, 0)
+ c.Check(result.Panicked, Equals, 0)
+ c.Check(result.FixturePanicked, Equals, 0)
+ c.Check(result.Missed, Equals, 0)
+ c.Check(result.RunError, IsNil)
+}
+
+func (s *RunS) TestPanicOnTest(c *C) {
+ output := String{}
+ helper := &FixtureHelper{panicOn: "Test1"}
+ result := Run(helper, &RunConf{Output: &output})
+ c.Check(result.Succeeded, Equals, 1)
+ c.Check(result.Failed, Equals, 0)
+ c.Check(result.Skipped, Equals, 0)
+ c.Check(result.Panicked, Equals, 1)
+ c.Check(result.FixturePanicked, Equals, 0)
+ c.Check(result.Missed, Equals, 0)
+ c.Check(result.RunError, IsNil)
+}
+
+func (s *RunS) TestPanicOnSetUpTest(c *C) {
+ output := String{}
+ helper := &FixtureHelper{panicOn: "SetUpTest"}
+ result := Run(helper, &RunConf{Output: &output})
+ c.Check(result.Succeeded, Equals, 0)
+ c.Check(result.Failed, Equals, 0)
+ c.Check(result.Skipped, Equals, 0)
+ c.Check(result.Panicked, Equals, 0)
+ c.Check(result.FixturePanicked, Equals, 1)
+ c.Check(result.Missed, Equals, 2)
+ c.Check(result.RunError, IsNil)
+}
+
+func (s *RunS) TestPanicOnSetUpSuite(c *C) {
+ output := String{}
+ helper := &FixtureHelper{panicOn: "SetUpSuite"}
+ result := Run(helper, &RunConf{Output: &output})
+ c.Check(result.Succeeded, Equals, 0)
+ c.Check(result.Failed, Equals, 0)
+ c.Check(result.Skipped, Equals, 0)
+ c.Check(result.Panicked, Equals, 0)
+ c.Check(result.FixturePanicked, Equals, 1)
+ c.Check(result.Missed, Equals, 2)
+ c.Check(result.RunError, IsNil)
+}
+
+// -----------------------------------------------------------------------
+// Check result aggregation.
+
+func (s *RunS) TestAdd(c *C) {
+ result := &Result{
+ Succeeded: 1,
+ Skipped: 2,
+ Failed: 3,
+ Panicked: 4,
+ FixturePanicked: 5,
+ Missed: 6,
+ ExpectedFailures: 7,
+ }
+ result.Add(&Result{
+ Succeeded: 10,
+ Skipped: 20,
+ Failed: 30,
+ Panicked: 40,
+ FixturePanicked: 50,
+ Missed: 60,
+ ExpectedFailures: 70,
+ })
+ c.Check(result.Succeeded, Equals, 11)
+ c.Check(result.Skipped, Equals, 22)
+ c.Check(result.Failed, Equals, 33)
+ c.Check(result.Panicked, Equals, 44)
+ c.Check(result.FixturePanicked, Equals, 55)
+ c.Check(result.Missed, Equals, 66)
+ c.Check(result.ExpectedFailures, Equals, 77)
+ c.Check(result.RunError, IsNil)
+}
+
+// -----------------------------------------------------------------------
+// Check the Passed() method.
+
+func (s *RunS) TestPassed(c *C) {
+ c.Assert((&Result{}).Passed(), Equals, true)
+ c.Assert((&Result{Succeeded: 1}).Passed(), Equals, true)
+ c.Assert((&Result{Skipped: 1}).Passed(), Equals, true)
+ c.Assert((&Result{Failed: 1}).Passed(), Equals, false)
+ c.Assert((&Result{Panicked: 1}).Passed(), Equals, false)
+ c.Assert((&Result{FixturePanicked: 1}).Passed(), Equals, false)
+ c.Assert((&Result{Missed: 1}).Passed(), Equals, false)
+ c.Assert((&Result{RunError: errors.New("!")}).Passed(), Equals, false)
+}
+
+// -----------------------------------------------------------------------
+// Check that result printing is working correctly.
+
+func (s *RunS) TestPrintSuccess(c *C) {
+ result := &Result{Succeeded: 5}
+ c.Check(result.String(), Equals, "OK: 5 passed")
+}
+
+func (s *RunS) TestPrintFailure(c *C) {
+ result := &Result{Failed: 5}
+ c.Check(result.String(), Equals, "OOPS: 0 passed, 5 FAILED")
+}
+
+func (s *RunS) TestPrintSkipped(c *C) {
+ result := &Result{Skipped: 5}
+ c.Check(result.String(), Equals, "OK: 0 passed, 5 skipped")
+}
+
+func (s *RunS) TestPrintExpectedFailures(c *C) {
+ result := &Result{ExpectedFailures: 5}
+ c.Check(result.String(), Equals, "OK: 0 passed, 5 expected failures")
+}
+
+func (s *RunS) TestPrintPanicked(c *C) {
+ result := &Result{Panicked: 5}
+ c.Check(result.String(), Equals, "OOPS: 0 passed, 5 PANICKED")
+}
+
+func (s *RunS) TestPrintFixturePanicked(c *C) {
+ result := &Result{FixturePanicked: 5}
+ c.Check(result.String(), Equals, "OOPS: 0 passed, 5 FIXTURE-PANICKED")
+}
+
+func (s *RunS) TestPrintMissed(c *C) {
+ result := &Result{Missed: 5}
+ c.Check(result.String(), Equals, "OOPS: 0 passed, 5 MISSED")
+}
+
+func (s *RunS) TestPrintAll(c *C) {
+ result := &Result{Succeeded: 1, Skipped: 2, ExpectedFailures: 3,
+ Panicked: 4, FixturePanicked: 5, Missed: 6}
+ c.Check(result.String(), Equals,
+ "OOPS: 1 passed, 2 skipped, 3 expected failures, 4 PANICKED, "+
+ "5 FIXTURE-PANICKED, 6 MISSED")
+}
+
+func (s *RunS) TestPrintRunError(c *C) {
+ result := &Result{Succeeded: 1, Failed: 1,
+ RunError: errors.New("Kaboom!")}
+ c.Check(result.String(), Equals, "ERROR: Kaboom!")
+}
+
+// -----------------------------------------------------------------------
+// Verify that the method pattern flag works correctly.
+
+func (s *RunS) TestFilterTestName(c *C) {
+ helper := FixtureHelper{}
+ output := String{}
+ runConf := RunConf{Output: &output, Filter: "Test[91]"}
+ Run(&helper, &runConf)
+ c.Check(helper.calls[0], Equals, "SetUpSuite")
+ c.Check(helper.calls[1], Equals, "SetUpTest")
+ c.Check(helper.calls[2], Equals, "Test1")
+ c.Check(helper.calls[3], Equals, "TearDownTest")
+ c.Check(helper.calls[4], Equals, "TearDownSuite")
+ c.Check(len(helper.calls), Equals, 5)
+}
+
+func (s *RunS) TestFilterTestNameWithAll(c *C) {
+ helper := FixtureHelper{}
+ output := String{}
+ runConf := RunConf{Output: &output, Filter: ".*"}
+ Run(&helper, &runConf)
+ c.Check(helper.calls[0], Equals, "SetUpSuite")
+ c.Check(helper.calls[1], Equals, "SetUpTest")
+ c.Check(helper.calls[2], Equals, "Test1")
+ c.Check(helper.calls[3], Equals, "TearDownTest")
+ c.Check(helper.calls[4], Equals, "SetUpTest")
+ c.Check(helper.calls[5], Equals, "Test2")
+ c.Check(helper.calls[6], Equals, "TearDownTest")
+ c.Check(helper.calls[7], Equals, "TearDownSuite")
+ c.Check(len(helper.calls), Equals, 8)
+}
+
+func (s *RunS) TestFilterSuiteName(c *C) {
+ helper := FixtureHelper{}
+ output := String{}
+ runConf := RunConf{Output: &output, Filter: "FixtureHelper"}
+ Run(&helper, &runConf)
+ c.Check(helper.calls[0], Equals, "SetUpSuite")
+ c.Check(helper.calls[1], Equals, "SetUpTest")
+ c.Check(helper.calls[2], Equals, "Test1")
+ c.Check(helper.calls[3], Equals, "TearDownTest")
+ c.Check(helper.calls[4], Equals, "SetUpTest")
+ c.Check(helper.calls[5], Equals, "Test2")
+ c.Check(helper.calls[6], Equals, "TearDownTest")
+ c.Check(helper.calls[7], Equals, "TearDownSuite")
+ c.Check(len(helper.calls), Equals, 8)
+}
+
+func (s *RunS) TestFilterSuiteNameAndTestName(c *C) {
+ helper := FixtureHelper{}
+ output := String{}
+ runConf := RunConf{Output: &output, Filter: "FixtureHelper\\.Test2"}
+ Run(&helper, &runConf)
+ c.Check(helper.calls[0], Equals, "SetUpSuite")
+ c.Check(helper.calls[1], Equals, "SetUpTest")
+ c.Check(helper.calls[2], Equals, "Test2")
+ c.Check(helper.calls[3], Equals, "TearDownTest")
+ c.Check(helper.calls[4], Equals, "TearDownSuite")
+ c.Check(len(helper.calls), Equals, 5)
+}
+
+func (s *RunS) TestFilterAllOut(c *C) {
+ helper := FixtureHelper{}
+ output := String{}
+ runConf := RunConf{Output: &output, Filter: "NotFound"}
+ Run(&helper, &runConf)
+ c.Check(len(helper.calls), Equals, 0)
+}
+
+func (s *RunS) TestRequirePartialMatch(c *C) {
+ helper := FixtureHelper{}
+ output := String{}
+ runConf := RunConf{Output: &output, Filter: "est"}
+ Run(&helper, &runConf)
+ c.Check(len(helper.calls), Equals, 8)
+}
+
+func (s *RunS) TestFilterError(c *C) {
+ helper := FixtureHelper{}
+ output := String{}
+ runConf := RunConf{Output: &output, Filter: "]["}
+ result := Run(&helper, &runConf)
+ c.Check(result.String(), Equals,
+ "ERROR: Bad filter expression: error parsing regexp: missing closing ]: `[`")
+ c.Check(len(helper.calls), Equals, 0)
+}
+
+// -----------------------------------------------------------------------
+// Verify that List works correctly.
+
+func (s *RunS) TestListFiltered(c *C) {
+ names := List(&FixtureHelper{}, &RunConf{Filter: "1"})
+ c.Assert(names, DeepEquals, []string{
+ "FixtureHelper.Test1",
+ })
+}
+
+func (s *RunS) TestList(c *C) {
+ names := List(&FixtureHelper{}, &RunConf{})
+ c.Assert(names, DeepEquals, []string{
+ "FixtureHelper.Test1",
+ "FixtureHelper.Test2",
+ })
+}
+
+// -----------------------------------------------------------------------
+// Verify that verbose mode prints tests which pass as well.
+
+func (s *RunS) TestVerboseMode(c *C) {
+ helper := FixtureHelper{}
+ output := String{}
+ runConf := RunConf{Output: &output, Verbose: true}
+ Run(&helper, &runConf)
+
+ expected := "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test1\t *[.0-9]+s\n" +
+ "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test2\t *[.0-9]+s\n"
+
+ c.Assert(output.value, Matches, expected)
+}
+
+func (s *RunS) TestVerboseModeWithFailBeforePass(c *C) {
+ helper := FixtureHelper{panicOn: "Test1"}
+ output := String{}
+ runConf := RunConf{Output: &output, Verbose: true}
+ Run(&helper, &runConf)
+
+ expected := "(?s).*PANIC.*\n-+\n" + // Should have an extra line.
+ "PASS: check_test\\.go:[0-9]+: FixtureHelper\\.Test2\t *[.0-9]+s\n"
+
+ c.Assert(output.value, Matches, expected)
+}
+
+// -----------------------------------------------------------------------
+// Verify the stream output mode. In this mode there's no output caching.
+
+type StreamHelper struct {
+ l2 sync.Mutex
+ l3 sync.Mutex
+}
+
+func (s *StreamHelper) SetUpSuite(c *C) {
+ c.Log("0")
+}
+
+func (s *StreamHelper) Test1(c *C) {
+ c.Log("1")
+ s.l2.Lock()
+ s.l3.Lock()
+ go func() {
+ s.l2.Lock() // Wait for "2".
+ c.Log("3")
+ s.l3.Unlock()
+ }()
+}
+
+func (s *StreamHelper) Test2(c *C) {
+ c.Log("2")
+ s.l2.Unlock()
+ s.l3.Lock() // Wait for "3".
+ c.Fail()
+ c.Log("4")
+}
+
+func (s *RunS) TestStreamMode(c *C) {
+ helper := &StreamHelper{}
+ output := String{}
+ runConf := RunConf{Output: &output, Stream: true}
+ Run(helper, &runConf)
+
+ expected := "START: run_test\\.go:[0-9]+: StreamHelper\\.SetUpSuite\n0\n" +
+ "PASS: run_test\\.go:[0-9]+: StreamHelper\\.SetUpSuite\t *[.0-9]+s\n\n" +
+ "START: run_test\\.go:[0-9]+: StreamHelper\\.Test1\n1\n" +
+ "PASS: run_test\\.go:[0-9]+: StreamHelper\\.Test1\t *[.0-9]+s\n\n" +
+ "START: run_test\\.go:[0-9]+: StreamHelper\\.Test2\n2\n3\n4\n" +
+ "FAIL: run_test\\.go:[0-9]+: StreamHelper\\.Test2\n\n"
+
+ c.Assert(output.value, Matches, expected)
+}
+
+type StreamMissHelper struct{}
+
+func (s *StreamMissHelper) SetUpSuite(c *C) {
+ c.Log("0")
+ c.Fail()
+}
+
+func (s *StreamMissHelper) Test1(c *C) {
+ c.Log("1")
+}
+
+func (s *RunS) TestStreamModeWithMiss(c *C) {
+ helper := &StreamMissHelper{}
+ output := String{}
+ runConf := RunConf{Output: &output, Stream: true}
+ Run(helper, &runConf)
+
+ expected := "START: run_test\\.go:[0-9]+: StreamMissHelper\\.SetUpSuite\n0\n" +
+ "FAIL: run_test\\.go:[0-9]+: StreamMissHelper\\.SetUpSuite\n\n" +
+ "START: run_test\\.go:[0-9]+: StreamMissHelper\\.Test1\n" +
+ "MISS: run_test\\.go:[0-9]+: StreamMissHelper\\.Test1\n\n"
+
+ c.Assert(output.value, Matches, expected)
+}
+
+// -----------------------------------------------------------------------
+// Verify that that the keep work dir request indeed does so.
+
+type WorkDirSuite struct{}
+
+func (s *WorkDirSuite) Test(c *C) {
+ c.MkDir()
+}
+
+func (s *RunS) TestKeepWorkDir(c *C) {
+ output := String{}
+ runConf := RunConf{Output: &output, Verbose: true, KeepWorkDir: true}
+ result := Run(&WorkDirSuite{}, &runConf)
+
+ c.Assert(result.String(), Matches, ".*\nWORK="+result.WorkDir)
+
+ stat, err := os.Stat(result.WorkDir)
+ c.Assert(err, IsNil)
+ c.Assert(stat.IsDir(), Equals, true)
+}
diff --git a/vendor/github.com/magiconair/properties/benchmark_test.go b/vendor/github.com/magiconair/properties/benchmark_test.go
new file mode 100644
index 0000000000..b2019e1033
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/benchmark_test.go
@@ -0,0 +1,22 @@
+// Copyright 2013-2014 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import (
+ "fmt"
+ "testing"
+)
+
+// Benchmarks the decoder by creating a property file with 1000 key/value pairs.
+func BenchmarkLoad(b *testing.B) {
+ input := ""
+ for i := 0; i < 1000; i++ {
+ input += fmt.Sprintf("key%d=value%d\n", i, i)
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ Load([]byte(input), ISO_8859_1)
+ }
+}
diff --git a/vendor/github.com/magiconair/properties/decode.go b/vendor/github.com/magiconair/properties/decode.go
new file mode 100644
index 0000000000..b989e6397c
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/decode.go
@@ -0,0 +1,290 @@
+// Copyright 2016 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Decode assigns property values to exported fields of a struct.
+//
+// Decode traverses v recursively and returns an error if a value cannot be
+// converted to the field type or a required value is missing for a field.
+//
+// The following type dependent decodings are used:
+//
+// String, boolean, numeric fields have the value of the property key assigned.
+// The property key name is the name of the field. A different key and a default
+// value can be set in the field's tag. Fields without default value are
+// required. If the value cannot be converted to the field type an error is
+// returned.
+//
+// time.Duration fields have the result of time.ParseDuration() assigned.
+//
+// time.Time fields have the vaule of time.Parse() assigned. The default layout
+// is time.RFC3339 but can be set in the field's tag.
+//
+// Arrays and slices of string, boolean, numeric, time.Duration and time.Time
+// fields have the value interpreted as a comma separated list of values. The
+// individual values are trimmed of whitespace and empty values are ignored. A
+// default value can be provided as a semicolon separated list in the field's
+// tag.
+//
+// Struct fields are decoded recursively using the field name plus "." as
+// prefix. The prefix (without dot) can be overridden in the field's tag.
+// Default values are not supported in the field's tag. Specify them on the
+// fields of the inner struct instead.
+//
+// Map fields must have a key of type string and are decoded recursively by
+// using the field's name plus ".' as prefix and the next element of the key
+// name as map key. The prefix (without dot) can be overridden in the field's
+// tag. Default values are not supported.
+//
+// Examples:
+//
+// // Field is ignored.
+// Field int `properties:"-"`
+//
+// // Field is assigned value of 'Field'.
+// Field int
+//
+// // Field is assigned value of 'myName'.
+// Field int `properties:"myName"`
+//
+// // Field is assigned value of key 'myName' and has a default
+// // value 15 if the key does not exist.
+// Field int `properties:"myName,default=15"`
+//
+// // Field is assigned value of key 'Field' and has a default
+// // value 15 if the key does not exist.
+// Field int `properties:",default=15"`
+//
+// // Field is assigned value of key 'date' and the date
+// // is in format 2006-01-02
+// Field time.Time `properties:"date,layout=2006-01-02"`
+//
+// // Field is assigned the non-empty and whitespace trimmed
+// // values of key 'Field' split by commas.
+// Field []string
+//
+// // Field is assigned the non-empty and whitespace trimmed
+// // values of key 'Field' split by commas and has a default
+// // value ["a", "b", "c"] if the key does not exist.
+// Field []string `properties:",default=a;b;c"`
+//
+// // Field is decoded recursively with "Field." as key prefix.
+// Field SomeStruct
+//
+// // Field is decoded recursively with "myName." as key prefix.
+// Field SomeStruct `properties:"myName"`
+//
+// // Field is decoded recursively with "Field." as key prefix
+// // and the next dotted element of the key as map key.
+// Field map[string]string
+//
+// // Field is decoded recursively with "myName." as key prefix
+// // and the next dotted element of the key as map key.
+// Field map[string]string `properties:"myName"`
+func (p *Properties) Decode(x interface{}) error {
+ t, v := reflect.TypeOf(x), reflect.ValueOf(x)
+ if t.Kind() != reflect.Ptr || v.Elem().Type().Kind() != reflect.Struct {
+ return fmt.Errorf("not a pointer to struct: %s", t)
+ }
+ if err := dec(p, "", nil, nil, v); err != nil {
+ return err
+ }
+ return nil
+}
+
+func dec(p *Properties, key string, def *string, opts map[string]string, v reflect.Value) error {
+ t := v.Type()
+
+ // value returns the property value for key or the default if provided.
+ value := func() (string, error) {
+ if val, ok := p.Get(key); ok {
+ return val, nil
+ }
+ if def != nil {
+ return *def, nil
+ }
+ return "", fmt.Errorf("missing required key %s", key)
+ }
+
+ // conv converts a string to a value of the given type.
+ conv := func(s string, t reflect.Type) (val reflect.Value, err error) {
+ var v interface{}
+
+ switch {
+ case isDuration(t):
+ v, err = time.ParseDuration(s)
+
+ case isTime(t):
+ layout := opts["layout"]
+ if layout == "" {
+ layout = time.RFC3339
+ }
+ v, err = time.Parse(layout, s)
+
+ case isBool(t):
+ v, err = boolVal(s), nil
+
+ case isString(t):
+ v, err = s, nil
+
+ case isFloat(t):
+ v, err = strconv.ParseFloat(s, 64)
+
+ case isInt(t):
+ v, err = strconv.ParseInt(s, 10, 64)
+
+ case isUint(t):
+ v, err = strconv.ParseUint(s, 10, 64)
+
+ default:
+ return reflect.Zero(t), fmt.Errorf("unsupported type %s", t)
+ }
+ if err != nil {
+ return reflect.Zero(t), err
+ }
+ return reflect.ValueOf(v).Convert(t), nil
+ }
+
+ // keydef returns the property key and the default value based on the
+ // name of the struct field and the options in the tag.
+ keydef := func(f reflect.StructField) (string, *string, map[string]string) {
+ key, opts := parseTag(f.Tag.Get("properties"))
+
+ var def *string
+ if d, ok := opts["default"]; ok {
+ def = &d
+ }
+ if key != "" {
+ return key, def, opts
+ }
+ return f.Name, def, opts
+ }
+
+ switch {
+ case isDuration(t) || isTime(t) || isBool(t) || isString(t) || isFloat(t) || isInt(t) || isUint(t):
+ s, err := value()
+ if err != nil {
+ return err
+ }
+ val, err := conv(s, t)
+ if err != nil {
+ return err
+ }
+ v.Set(val)
+
+ case isPtr(t):
+ return dec(p, key, def, opts, v.Elem())
+
+ case isStruct(t):
+ for i := 0; i < v.NumField(); i++ {
+ fv := v.Field(i)
+ fk, def, opts := keydef(t.Field(i))
+ if !fv.CanSet() {
+ return fmt.Errorf("cannot set ", t.Field(i).Name)
+ }
+ if fk == "-" {
+ continue
+ }
+ if key != "" {
+ fk = key + "." + fk
+ }
+ if err := dec(p, fk, def, opts, fv); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ case isArray(t):
+ val, err := value()
+ if err != nil {
+ return err
+ }
+ vals := split(val, ";")
+ a := reflect.MakeSlice(t, 0, len(vals))
+ for _, s := range vals {
+ val, err := conv(s, t.Elem())
+ if err != nil {
+ return err
+ }
+ a = reflect.Append(a, val)
+ }
+ v.Set(a)
+
+ case isMap(t):
+ valT := t.Elem()
+ m := reflect.MakeMap(t)
+ for postfix, _ := range p.FilterStripPrefix(key + ".").m {
+ pp := strings.SplitN(postfix, ".", 2)
+ mk, mv := pp[0], reflect.New(valT)
+ if err := dec(p, key+"."+mk, nil, nil, mv); err != nil {
+ return err
+ }
+ m.SetMapIndex(reflect.ValueOf(mk), mv.Elem())
+ }
+ v.Set(m)
+
+ default:
+ return fmt.Errorf("unsupported type %s", t)
+ }
+ return nil
+}
+
+// split splits a string on sep, trims whitespace of elements
+// and omits empty elements
+func split(s string, sep string) []string {
+ var a []string
+ for _, v := range strings.Split(s, sep) {
+ if v = strings.TrimSpace(v); v != "" {
+ a = append(a, v)
+ }
+ }
+ return a
+}
+
+// parseTag parses a "key,k=v,k=v,..."
+func parseTag(tag string) (key string, opts map[string]string) {
+ opts = map[string]string{}
+ for i, s := range strings.Split(tag, ",") {
+ if i == 0 {
+ key = s
+ continue
+ }
+
+ pp := strings.SplitN(s, "=", 2)
+ if len(pp) == 1 {
+ opts[pp[0]] = ""
+ } else {
+ opts[pp[0]] = pp[1]
+ }
+ }
+ return key, opts
+}
+
+func isArray(t reflect.Type) bool { return t.Kind() == reflect.Array || t.Kind() == reflect.Slice }
+func isBool(t reflect.Type) bool { return t.Kind() == reflect.Bool }
+func isDuration(t reflect.Type) bool { return t == reflect.TypeOf(time.Second) }
+func isMap(t reflect.Type) bool { return t.Kind() == reflect.Map }
+func isNumeric(t reflect.Type) bool { return isInt(t) || isUint(t) || isFloat(t) }
+func isPtr(t reflect.Type) bool { return t.Kind() == reflect.Ptr }
+func isString(t reflect.Type) bool { return t.Kind() == reflect.String }
+func isStruct(t reflect.Type) bool { return t.Kind() == reflect.Struct }
+func isTime(t reflect.Type) bool { return t == reflect.TypeOf(time.Time{}) }
+func isFloat(t reflect.Type) bool {
+ return t.Kind() == reflect.Float32 || t.Kind() == reflect.Float64
+}
+func isInt(t reflect.Type) bool {
+ return t.Kind() == reflect.Int || t.Kind() == reflect.Int8 || t.Kind() == reflect.Int16 || t.Kind() == reflect.Int32 || t.Kind() == reflect.Int64
+}
+func isUint(t reflect.Type) bool {
+ return t.Kind() == reflect.Uint || t.Kind() == reflect.Uint8 || t.Kind() == reflect.Uint16 || t.Kind() == reflect.Uint32 || t.Kind() == reflect.Uint64
+}
diff --git a/vendor/github.com/magiconair/properties/decode_test.go b/vendor/github.com/magiconair/properties/decode_test.go
new file mode 100644
index 0000000000..a12f1e208b
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/decode_test.go
@@ -0,0 +1,299 @@
+// Copyright 2016 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import (
+ "reflect"
+ "testing"
+ "time"
+)
+
+func TestDecodeValues(t *testing.T) {
+ type S struct {
+ S string
+ BT bool
+ BF bool
+ I int
+ I8 int8
+ I16 int16
+ I32 int32
+ I64 int64
+ U uint
+ U8 uint8
+ U16 uint16
+ U32 uint32
+ U64 uint64
+ F32 float32
+ F64 float64
+ D time.Duration
+ TM time.Time
+ }
+ in := `
+ S=abc
+ BT=true
+ BF=false
+ I=-1
+ I8=-8
+ I16=-16
+ I32=-32
+ I64=-64
+ U=1
+ U8=8
+ U16=16
+ U32=32
+ U64=64
+ F32=3.2
+ F64=6.4
+ D=5s
+ TM=2015-01-02T12:34:56Z
+ `
+ out := &S{
+ S: "abc",
+ BT: true,
+ BF: false,
+ I: -1,
+ I8: -8,
+ I16: -16,
+ I32: -32,
+ I64: -64,
+ U: 1,
+ U8: 8,
+ U16: 16,
+ U32: 32,
+ U64: 64,
+ F32: 3.2,
+ F64: 6.4,
+ D: 5 * time.Second,
+ TM: tm(t, time.RFC3339, "2015-01-02T12:34:56Z"),
+ }
+ testDecode(t, in, &S{}, out)
+}
+
+func TestDecodeValueDefaults(t *testing.T) {
+ type S struct {
+ S string `properties:",default=abc"`
+ BT bool `properties:",default=true"`
+ BF bool `properties:",default=false"`
+ I int `properties:",default=-1"`
+ I8 int8 `properties:",default=-8"`
+ I16 int16 `properties:",default=-16"`
+ I32 int32 `properties:",default=-32"`
+ I64 int64 `properties:",default=-64"`
+ U uint `properties:",default=1"`
+ U8 uint8 `properties:",default=8"`
+ U16 uint16 `properties:",default=16"`
+ U32 uint32 `properties:",default=32"`
+ U64 uint64 `properties:",default=64"`
+ F32 float32 `properties:",default=3.2"`
+ F64 float64 `properties:",default=6.4"`
+ D time.Duration `properties:",default=5s"`
+ TM time.Time `properties:",default=2015-01-02T12:34:56Z"`
+ }
+ out := &S{
+ S: "abc",
+ BT: true,
+ BF: false,
+ I: -1,
+ I8: -8,
+ I16: -16,
+ I32: -32,
+ I64: -64,
+ U: 1,
+ U8: 8,
+ U16: 16,
+ U32: 32,
+ U64: 64,
+ F32: 3.2,
+ F64: 6.4,
+ D: 5 * time.Second,
+ TM: tm(t, time.RFC3339, "2015-01-02T12:34:56Z"),
+ }
+ testDecode(t, "", &S{}, out)
+}
+
+func TestDecodeArrays(t *testing.T) {
+ type S struct {
+ S []string
+ B []bool
+ I []int
+ I8 []int8
+ I16 []int16
+ I32 []int32
+ I64 []int64
+ U []uint
+ U8 []uint8
+ U16 []uint16
+ U32 []uint32
+ U64 []uint64
+ F32 []float32
+ F64 []float64
+ D []time.Duration
+ TM []time.Time
+ }
+ in := `
+ S=a;b
+ B=true;false
+ I=-1;-2
+ I8=-8;-9
+ I16=-16;-17
+ I32=-32;-33
+ I64=-64;-65
+ U=1;2
+ U8=8;9
+ U16=16;17
+ U32=32;33
+ U64=64;65
+ F32=3.2;3.3
+ F64=6.4;6.5
+ D=4s;5s
+ TM=2015-01-01T00:00:00Z;2016-01-01T00:00:00Z
+ `
+ out := &S{
+ S: []string{"a", "b"},
+ B: []bool{true, false},
+ I: []int{-1, -2},
+ I8: []int8{-8, -9},
+ I16: []int16{-16, -17},
+ I32: []int32{-32, -33},
+ I64: []int64{-64, -65},
+ U: []uint{1, 2},
+ U8: []uint8{8, 9},
+ U16: []uint16{16, 17},
+ U32: []uint32{32, 33},
+ U64: []uint64{64, 65},
+ F32: []float32{3.2, 3.3},
+ F64: []float64{6.4, 6.5},
+ D: []time.Duration{4 * time.Second, 5 * time.Second},
+ TM: []time.Time{tm(t, time.RFC3339, "2015-01-01T00:00:00Z"), tm(t, time.RFC3339, "2016-01-01T00:00:00Z")},
+ }
+ testDecode(t, in, &S{}, out)
+}
+
+func TestDecodeArrayDefaults(t *testing.T) {
+ type S struct {
+ S []string `properties:",default=a;b"`
+ B []bool `properties:",default=true;false"`
+ I []int `properties:",default=-1;-2"`
+ I8 []int8 `properties:",default=-8;-9"`
+ I16 []int16 `properties:",default=-16;-17"`
+ I32 []int32 `properties:",default=-32;-33"`
+ I64 []int64 `properties:",default=-64;-65"`
+ U []uint `properties:",default=1;2"`
+ U8 []uint8 `properties:",default=8;9"`
+ U16 []uint16 `properties:",default=16;17"`
+ U32 []uint32 `properties:",default=32;33"`
+ U64 []uint64 `properties:",default=64;65"`
+ F32 []float32 `properties:",default=3.2;3.3"`
+ F64 []float64 `properties:",default=6.4;6.5"`
+ D []time.Duration `properties:",default=4s;5s"`
+ TM []time.Time `properties:",default=2015-01-01T00:00:00Z;2016-01-01T00:00:00Z"`
+ }
+ out := &S{
+ S: []string{"a", "b"},
+ B: []bool{true, false},
+ I: []int{-1, -2},
+ I8: []int8{-8, -9},
+ I16: []int16{-16, -17},
+ I32: []int32{-32, -33},
+ I64: []int64{-64, -65},
+ U: []uint{1, 2},
+ U8: []uint8{8, 9},
+ U16: []uint16{16, 17},
+ U32: []uint32{32, 33},
+ U64: []uint64{64, 65},
+ F32: []float32{3.2, 3.3},
+ F64: []float64{6.4, 6.5},
+ D: []time.Duration{4 * time.Second, 5 * time.Second},
+ TM: []time.Time{tm(t, time.RFC3339, "2015-01-01T00:00:00Z"), tm(t, time.RFC3339, "2016-01-01T00:00:00Z")},
+ }
+ testDecode(t, "", &S{}, out)
+}
+
+func TestDecodeSkipUndef(t *testing.T) {
+ type S struct {
+ X string `properties:"-"`
+ Undef string `properties:",default=some value"`
+ }
+ in := `X=ignore`
+ out := &S{"", "some value"}
+ testDecode(t, in, &S{}, out)
+}
+
+func TestDecodeStruct(t *testing.T) {
+ type A struct {
+ S string
+ T string `properties:"t"`
+ U string `properties:"u,default=uuu"`
+ }
+ type S struct {
+ A A
+ B A `properties:"b"`
+ }
+ in := `
+ A.S=sss
+ A.t=ttt
+ b.S=SSS
+ b.t=TTT
+ `
+ out := &S{
+ A{S: "sss", T: "ttt", U: "uuu"},
+ A{S: "SSS", T: "TTT", U: "uuu"},
+ }
+ testDecode(t, in, &S{}, out)
+}
+
+func TestDecodeMap(t *testing.T) {
+ type S struct {
+ A string `properties:"a"`
+ }
+ type X struct {
+ A map[string]string
+ B map[string][]string
+ C map[string]map[string]string
+ D map[string]S
+ E map[string]int
+ F map[string]int `properties:"-"`
+ }
+ in := `
+ A.foo=bar
+ A.bar=bang
+ B.foo=a;b;c
+ B.bar=1;2;3
+ C.foo.one=1
+ C.foo.two=2
+ C.bar.three=3
+ C.bar.four=4
+ D.foo.a=bar
+ `
+ out := &X{
+ A: map[string]string{"foo": "bar", "bar": "bang"},
+ B: map[string][]string{"foo": []string{"a", "b", "c"}, "bar": []string{"1", "2", "3"}},
+ C: map[string]map[string]string{"foo": map[string]string{"one": "1", "two": "2"}, "bar": map[string]string{"three": "3", "four": "4"}},
+ D: map[string]S{"foo": S{"bar"}},
+ E: map[string]int{},
+ }
+ testDecode(t, in, &X{}, out)
+}
+
+func testDecode(t *testing.T, in string, v, out interface{}) {
+ p, err := parse(in)
+ if err != nil {
+ t.Fatalf("got %v want nil", err)
+ }
+ if err := p.Decode(v); err != nil {
+ t.Fatalf("got %v want nil", err)
+ }
+ if got, want := v, out; !reflect.DeepEqual(got, want) {
+ t.Fatalf("\ngot %+v\nwant %+v", got, want)
+ }
+}
+
+func tm(t *testing.T, layout, s string) time.Time {
+ tm, err := time.Parse(layout, s)
+ if err != nil {
+ t.Fatalf("got %v want nil", err)
+ }
+ return tm
+}
diff --git a/vendor/github.com/magiconair/properties/doc.go b/vendor/github.com/magiconair/properties/doc.go
new file mode 100644
index 0000000000..ed1ff510ac
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/doc.go
@@ -0,0 +1,156 @@
+// Copyright 2016 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package properties provides functions for reading and writing
+// ISO-8859-1 and UTF-8 encoded .properties files and has
+// support for recursive property expansion.
+//
+// Java properties files are ISO-8859-1 encoded and use Unicode
+// literals for characters outside the ISO character set. Unicode
+// literals can be used in UTF-8 encoded properties files but
+// aren't necessary.
+//
+// To load a single properties file use MustLoadFile():
+//
+// p := properties.MustLoadFile(filename, properties.UTF8)
+//
+// To load multiple properties files use MustLoadFiles()
+// which loads the files in the given order and merges the
+// result. Missing properties files can be ignored if the
+// 'ignoreMissing' flag is set to true.
+//
+// Filenames can contain environment variables which are expanded
+// before loading.
+//
+// f1 := "/etc/myapp/myapp.conf"
+// f2 := "/home/${USER}/myapp.conf"
+// p := MustLoadFiles([]string{f1, f2}, properties.UTF8, true)
+//
+// All of the different key/value delimiters ' ', ':' and '=' are
+// supported as well as the comment characters '!' and '#' and
+// multi-line values.
+//
+// ! this is a comment
+// # and so is this
+//
+// # the following expressions are equal
+// key value
+// key=value
+// key:value
+// key = value
+// key : value
+// key = val\
+// ue
+//
+// Properties stores all comments preceding a key and provides
+// GetComments() and SetComments() methods to retrieve and
+// update them. The convenience functions GetComment() and
+// SetComment() allow access to the last comment. The
+// WriteComment() method writes properties files including
+// the comments and with the keys in the original order.
+// This can be used for sanitizing properties files.
+//
+// Property expansion is recursive and circular references
+// and malformed expressions are not allowed and cause an
+// error. Expansion of environment variables is supported.
+//
+// # standard property
+// key = value
+//
+// # property expansion: key2 = value
+// key2 = ${key}
+//
+// # recursive expansion: key3 = value
+// key3 = ${key2}
+//
+// # circular reference (error)
+// key = ${key}
+//
+// # malformed expression (error)
+// key = ${ke
+//
+// # refers to the users' home dir
+// home = ${HOME}
+//
+// # local key takes precendence over env var: u = foo
+// USER = foo
+// u = ${USER}
+//
+// The default property expansion format is ${key} but can be
+// changed by setting different pre- and postfix values on the
+// Properties object.
+//
+// p := properties.NewProperties()
+// p.Prefix = "#["
+// p.Postfix = "]#"
+//
+// Properties provides convenience functions for getting typed
+// values with default values if the key does not exist or the
+// type conversion failed.
+//
+// # Returns true if the value is either "1", "on", "yes" or "true"
+// # Returns false for every other value and the default value if
+// # the key does not exist.
+// v = p.GetBool("key", false)
+//
+// # Returns the value if the key exists and the format conversion
+// # was successful. Otherwise, the default value is returned.
+// v = p.GetInt64("key", 999)
+// v = p.GetUint64("key", 999)
+// v = p.GetFloat64("key", 123.0)
+// v = p.GetString("key", "def")
+// v = p.GetDuration("key", 999)
+//
+// As an alterantive properties may be applied with the standard
+// library's flag implementation at any time.
+//
+// # Standard configuration
+// v = flag.Int("key", 999, "help message")
+// flag.Parse()
+//
+// # Merge p into the flag set
+// p.MustFlag(flag.CommandLine)
+//
+// Properties provides several MustXXX() convenience functions
+// which will terminate the app if an error occurs. The behavior
+// of the failure is configurable and the default is to call
+// log.Fatal(err). To have the MustXXX() functions panic instead
+// of logging the error set a different ErrorHandler before
+// you use the Properties package.
+//
+// properties.ErrorHandler = properties.PanicHandler
+//
+// # Will panic instead of logging an error
+// p := properties.MustLoadFile("config.properties")
+//
+// You can also provide your own ErrorHandler function. The only requirement
+// is that the error handler function must exit after handling the error.
+//
+// properties.ErrorHandler = func(err error) {
+// fmt.Println(err)
+// os.Exit(1)
+// }
+//
+// # Will write to stdout and then exit
+// p := properties.MustLoadFile("config.properties")
+//
+// Properties can also be loaded into a struct via the `Decode`
+// method, e.g.
+//
+// type S struct {
+// A string `properties:"a,default=foo"`
+// D time.Duration `properties:"timeout,default=5s"`
+// E time.Time `properties:"expires,layout=2006-01-02,default=2015-01-01"`
+// }
+//
+// See `Decode()` method for the full documentation.
+//
+// The following documents provide a description of the properties
+// file format.
+//
+// http://en.wikipedia.org/wiki/.properties
+//
+// http://docs.oracle.com/javase/7/docs/api/java/util/Properties.html#load%28java.io.Reader%29
+//
+package properties
diff --git a/vendor/github.com/magiconair/properties/example_test.go b/vendor/github.com/magiconair/properties/example_test.go
new file mode 100644
index 0000000000..3601217e28
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/example_test.go
@@ -0,0 +1,93 @@
+// Copyright 2016 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import (
+ "fmt"
+ "log"
+)
+
+func ExampleLoad_iso88591() {
+ buf := []byte("key = ISO-8859-1 value with unicode literal \\u2318 and umlaut \xE4") // 0xE4 == ä
+ p, _ := Load(buf, ISO_8859_1)
+ v, ok := p.Get("key")
+ fmt.Println(ok)
+ fmt.Println(v)
+ // Output:
+ // true
+ // ISO-8859-1 value with unicode literal ⌘ and umlaut ä
+}
+
+func ExampleLoad_utf8() {
+ p, _ := Load([]byte("key = UTF-8 value with unicode character ⌘ and umlaut ä"), UTF8)
+ v, ok := p.Get("key")
+ fmt.Println(ok)
+ fmt.Println(v)
+ // Output:
+ // true
+ // UTF-8 value with unicode character ⌘ and umlaut ä
+}
+
+func ExampleProperties_GetBool() {
+ var input = `
+ key=1
+ key2=On
+ key3=YES
+ key4=true`
+ p, _ := Load([]byte(input), ISO_8859_1)
+ fmt.Println(p.GetBool("key", false))
+ fmt.Println(p.GetBool("key2", false))
+ fmt.Println(p.GetBool("key3", false))
+ fmt.Println(p.GetBool("key4", false))
+ fmt.Println(p.GetBool("keyX", false))
+ // Output:
+ // true
+ // true
+ // true
+ // true
+ // false
+}
+
+func ExampleProperties_GetString() {
+ p, _ := Load([]byte("key=value"), ISO_8859_1)
+ v := p.GetString("another key", "default value")
+ fmt.Println(v)
+ // Output:
+ // default value
+}
+
+func Example() {
+ // Decode some key/value pairs with expressions
+ p, err := Load([]byte("key=value\nkey2=${key}"), ISO_8859_1)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Get a valid key
+ if v, ok := p.Get("key"); ok {
+ fmt.Println(v)
+ }
+
+ // Get an invalid key
+ if _, ok := p.Get("does not exist"); !ok {
+ fmt.Println("invalid key")
+ }
+
+ // Get a key with a default value
+ v := p.GetString("does not exist", "some value")
+ fmt.Println(v)
+
+ // Dump the expanded key/value pairs of the Properties
+ fmt.Println("Expanded key/value pairs")
+ fmt.Println(p)
+
+ // Output:
+ // value
+ // invalid key
+ // some value
+ // Expanded key/value pairs
+ // key = value
+ // key2 = value
+}
diff --git a/vendor/github.com/magiconair/properties/integrate.go b/vendor/github.com/magiconair/properties/integrate.go
new file mode 100644
index 0000000000..37baaad958
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/integrate.go
@@ -0,0 +1,34 @@
+// Copyright 2016 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import "flag"
+
+// MustFlag sets flags that are skipped by dst.Parse when p contains
+// the respective key for flag.Flag.Name.
+//
+// It's use is recommended with command line arguments as in:
+// flag.Parse()
+// p.MustFlag(flag.CommandLine)
+func (p *Properties) MustFlag(dst *flag.FlagSet) {
+ m := make(map[string]*flag.Flag)
+ dst.VisitAll(func(f *flag.Flag) {
+ m[f.Name] = f
+ })
+ dst.Visit(func(f *flag.Flag) {
+ delete(m, f.Name) // overridden
+ })
+
+ for name, f := range m {
+ v, ok := p.Get(name)
+ if !ok {
+ continue
+ }
+
+ if err := f.Value.Set(v); err != nil {
+ ErrorHandler(err)
+ }
+ }
+}
diff --git a/vendor/github.com/magiconair/properties/integrate_test.go b/vendor/github.com/magiconair/properties/integrate_test.go
new file mode 100644
index 0000000000..2daaf8ab6a
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/integrate_test.go
@@ -0,0 +1,74 @@
+// Copyright 2016 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import (
+ "flag"
+ "fmt"
+ "testing"
+)
+
+// TestFlag verifies Properties.MustFlag without flag.FlagSet.Parse
+func TestFlag(t *testing.T) {
+ f := flag.NewFlagSet("src", flag.PanicOnError)
+ gotS := f.String("s", "?", "string flag")
+ gotI := f.Int("i", -1, "int flag")
+
+ p := NewProperties()
+ p.Set("s", "t")
+ p.Set("i", "9")
+ p.MustFlag(f)
+
+ if want := "t"; *gotS != want {
+ t.Errorf("Got string s=%q, want %q", *gotS, want)
+ }
+ if want := 9; *gotI != want {
+ t.Errorf("Got int i=%d, want %d", *gotI, want)
+ }
+}
+
+// TestFlagOverride verifies Properties.MustFlag with flag.FlagSet.Parse.
+func TestFlagOverride(t *testing.T) {
+ f := flag.NewFlagSet("src", flag.PanicOnError)
+ gotA := f.Int("a", 1, "remain default")
+ gotB := f.Int("b", 2, "customized")
+ gotC := f.Int("c", 3, "overridden")
+
+ f.Parse([]string{"-c", "4"})
+
+ p := NewProperties()
+ p.Set("b", "5")
+ p.Set("c", "6")
+ p.MustFlag(f)
+
+ if want := 1; *gotA != want {
+ t.Errorf("Got remain default a=%d, want %d", *gotA, want)
+ }
+ if want := 5; *gotB != want {
+ t.Errorf("Got customized b=%d, want %d", *gotB, want)
+ }
+ if want := 4; *gotC != want {
+ t.Errorf("Got overriden c=%d, want %d", *gotC, want)
+ }
+}
+
+func ExampleProperties_MustFlag() {
+ x := flag.Int("x", 0, "demo customize")
+ y := flag.Int("y", 0, "demo override")
+
+ // Demo alternative for flag.Parse():
+ flag.CommandLine.Parse([]string{"-y", "10"})
+ fmt.Printf("flagged as x=%d, y=%d\n", *x, *y)
+
+ p := NewProperties()
+ p.Set("x", "7")
+ p.Set("y", "42") // note discard
+ p.MustFlag(flag.CommandLine)
+ fmt.Printf("configured to x=%d, y=%d\n", *x, *y)
+
+ // Output:
+ // flagged as x=0, y=10
+ // configured to x=7, y=10
+}
diff --git a/vendor/github.com/magiconair/properties/lex.go b/vendor/github.com/magiconair/properties/lex.go
new file mode 100644
index 0000000000..014e63f0ef
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/lex.go
@@ -0,0 +1,409 @@
+// Copyright 2016 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// Parts of the lexer are from the template/text/parser package
+// For these parts the following applies:
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file of the go 1.2
+// distribution.
+
+package properties
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// item represents a token or text string returned from the scanner.
+type item struct {
+ typ itemType // The type of this item.
+ pos int // The starting position, in bytes, of this item in the input string.
+ val string // The value of this item.
+}
+
+func (i item) String() string {
+ switch {
+ case i.typ == itemEOF:
+ return "EOF"
+ case i.typ == itemError:
+ return i.val
+ case len(i.val) > 10:
+ return fmt.Sprintf("%.10q...", i.val)
+ }
+ return fmt.Sprintf("%q", i.val)
+}
+
+// itemType identifies the type of lex items.
+type itemType int
+
+const (
+ itemError itemType = iota // error occurred; value is text of error
+ itemEOF
+ itemKey // a key
+ itemValue // a value
+ itemComment // a comment
+)
+
+// defines a constant for EOF
+const eof = -1
+
+// permitted whitespace characters space, FF and TAB
+const whitespace = " \f\t"
+
+// stateFn represents the state of the scanner as a function that returns the next state.
+type stateFn func(*lexer) stateFn
+
+// lexer holds the state of the scanner.
+type lexer struct {
+ input string // the string being scanned
+ state stateFn // the next lexing function to enter
+ pos int // current position in the input
+ start int // start position of this item
+ width int // width of last rune read from input
+ lastPos int // position of most recent item returned by nextItem
+ runes []rune // scanned runes for this item
+ items chan item // channel of scanned items
+}
+
+// next returns the next rune in the input.
+func (l *lexer) next() rune {
+ if int(l.pos) >= len(l.input) {
+ l.width = 0
+ return eof
+ }
+ r, w := utf8.DecodeRuneInString(l.input[l.pos:])
+ l.width = w
+ l.pos += l.width
+ return r
+}
+
+// peek returns but does not consume the next rune in the input.
+func (l *lexer) peek() rune {
+ r := l.next()
+ l.backup()
+ return r
+}
+
+// backup steps back one rune. Can only be called once per call of next.
+func (l *lexer) backup() {
+ l.pos -= l.width
+}
+
+// emit passes an item back to the client.
+func (l *lexer) emit(t itemType) {
+ item := item{t, l.start, string(l.runes)}
+ l.items <- item
+ l.start = l.pos
+ l.runes = l.runes[:0]
+}
+
+// ignore skips over the pending input before this point.
+func (l *lexer) ignore() {
+ l.start = l.pos
+}
+
+// appends the rune to the current value
+func (l *lexer) appendRune(r rune) {
+ l.runes = append(l.runes, r)
+}
+
+// accept consumes the next rune if it's from the valid set.
+func (l *lexer) accept(valid string) bool {
+ if strings.IndexRune(valid, l.next()) >= 0 {
+ return true
+ }
+ l.backup()
+ return false
+}
+
+// acceptRun consumes a run of runes from the valid set.
+func (l *lexer) acceptRun(valid string) {
+ for strings.IndexRune(valid, l.next()) >= 0 {
+ }
+ l.backup()
+}
+
+// acceptRunUntil consumes a run of runes up to a terminator.
+func (l *lexer) acceptRunUntil(term rune) {
+ for term != l.next() {
+ }
+ l.backup()
+}
+
+// hasText returns true if the current parsed text is not empty.
+func (l *lexer) isNotEmpty() bool {
+ return l.pos > l.start
+}
+
+// lineNumber reports which line we're on, based on the position of
+// the previous item returned by nextItem. Doing it this way
+// means we don't have to worry about peek double counting.
+func (l *lexer) lineNumber() int {
+ return 1 + strings.Count(l.input[:l.lastPos], "\n")
+}
+
+// errorf returns an error token and terminates the scan by passing
+// back a nil pointer that will be the next state, terminating l.nextItem.
+func (l *lexer) errorf(format string, args ...interface{}) stateFn {
+ l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)}
+ return nil
+}
+
+// nextItem returns the next item from the input.
+func (l *lexer) nextItem() item {
+ item := <-l.items
+ l.lastPos = item.pos
+ return item
+}
+
+// lex creates a new scanner for the input string.
+func lex(input string) *lexer {
+ l := &lexer{
+ input: input,
+ items: make(chan item),
+ runes: make([]rune, 0, 32),
+ }
+ go l.run()
+ return l
+}
+
+// run runs the state machine for the lexer.
+func (l *lexer) run() {
+ for l.state = lexBeforeKey(l); l.state != nil; {
+ l.state = l.state(l)
+ }
+}
+
+// state functions
+
+// lexBeforeKey scans until a key begins.
+func lexBeforeKey(l *lexer) stateFn {
+ switch r := l.next(); {
+ case isEOF(r):
+ l.emit(itemEOF)
+ return nil
+
+ case isEOL(r):
+ l.ignore()
+ return lexBeforeKey
+
+ case isComment(r):
+ return lexComment
+
+ case isWhitespace(r):
+ l.acceptRun(whitespace)
+ l.ignore()
+ return lexKey
+
+ default:
+ l.backup()
+ return lexKey
+ }
+}
+
+// lexComment scans a comment line. The comment character has already been scanned.
+func lexComment(l *lexer) stateFn {
+ l.acceptRun(whitespace)
+ l.ignore()
+ for {
+ switch r := l.next(); {
+ case isEOF(r):
+ l.ignore()
+ l.emit(itemEOF)
+ return nil
+ case isEOL(r):
+ l.emit(itemComment)
+ return lexBeforeKey
+ default:
+ l.appendRune(r)
+ }
+ }
+}
+
+// lexKey scans the key up to a delimiter
+func lexKey(l *lexer) stateFn {
+ var r rune
+
+Loop:
+ for {
+ switch r = l.next(); {
+
+ case isEscape(r):
+ err := l.scanEscapeSequence()
+ if err != nil {
+ return l.errorf(err.Error())
+ }
+
+ case isEndOfKey(r):
+ l.backup()
+ break Loop
+
+ case isEOF(r):
+ break Loop
+
+ default:
+ l.appendRune(r)
+ }
+ }
+
+ if len(l.runes) > 0 {
+ l.emit(itemKey)
+ }
+
+ if isEOF(r) {
+ l.emit(itemEOF)
+ return nil
+ }
+
+ return lexBeforeValue
+}
+
+// lexBeforeValue scans the delimiter between key and value.
+// Leading and trailing whitespace is ignored.
+// We expect to be just after the key.
+func lexBeforeValue(l *lexer) stateFn {
+ l.acceptRun(whitespace)
+ l.accept(":=")
+ l.acceptRun(whitespace)
+ l.ignore()
+ return lexValue
+}
+
+// lexValue scans text until the end of the line. We expect to be just after the delimiter.
+func lexValue(l *lexer) stateFn {
+ for {
+ switch r := l.next(); {
+ case isEscape(r):
+ r := l.peek()
+ if isEOL(r) {
+ l.next()
+ l.acceptRun(whitespace)
+ } else {
+ err := l.scanEscapeSequence()
+ if err != nil {
+ return l.errorf(err.Error())
+ }
+ }
+
+ case isEOL(r):
+ l.emit(itemValue)
+ l.ignore()
+ return lexBeforeKey
+
+ case isEOF(r):
+ l.emit(itemValue)
+ l.emit(itemEOF)
+ return nil
+
+ default:
+ l.appendRune(r)
+ }
+ }
+}
+
+// scanEscapeSequence scans either one of the escaped characters
+// or a unicode literal. We expect to be after the escape character.
+func (l *lexer) scanEscapeSequence() error {
+ switch r := l.next(); {
+
+ case isEscapedCharacter(r):
+ l.appendRune(decodeEscapedCharacter(r))
+ return nil
+
+ case atUnicodeLiteral(r):
+ return l.scanUnicodeLiteral()
+
+ case isEOF(r):
+ return fmt.Errorf("premature EOF")
+
+ // silently drop the escape character and append the rune as is
+ default:
+ l.appendRune(r)
+ return nil
+ }
+}
+
+// scans a unicode literal in the form \uXXXX. We expect to be after the \u.
+func (l *lexer) scanUnicodeLiteral() error {
+ // scan the digits
+ d := make([]rune, 4)
+ for i := 0; i < 4; i++ {
+ d[i] = l.next()
+ if d[i] == eof || !strings.ContainsRune("0123456789abcdefABCDEF", d[i]) {
+ return fmt.Errorf("invalid unicode literal")
+ }
+ }
+
+ // decode the digits into a rune
+ r, err := strconv.ParseInt(string(d), 16, 0)
+ if err != nil {
+ return err
+ }
+
+ l.appendRune(rune(r))
+ return nil
+}
+
+// decodeEscapedCharacter returns the unescaped rune. We expect to be after the escape character.
+func decodeEscapedCharacter(r rune) rune {
+ switch r {
+ case 'f':
+ return '\f'
+ case 'n':
+ return '\n'
+ case 'r':
+ return '\r'
+ case 't':
+ return '\t'
+ default:
+ return r
+ }
+}
+
+// atUnicodeLiteral reports whether we are at a unicode literal.
+// The escape character has already been consumed.
+func atUnicodeLiteral(r rune) bool {
+ return r == 'u'
+}
+
+// isComment reports whether we are at the start of a comment.
+func isComment(r rune) bool {
+ return r == '#' || r == '!'
+}
+
+// isEndOfKey reports whether the rune terminates the current key.
+func isEndOfKey(r rune) bool {
+ return strings.ContainsRune(" \f\t\r\n:=", r)
+}
+
+// isEOF reports whether we are at EOF.
+func isEOF(r rune) bool {
+ return r == eof
+}
+
+// isEOL reports whether we are at a new line character.
+func isEOL(r rune) bool {
+ return r == '\n' || r == '\r'
+}
+
+// isEscape reports whether the rune is the escape character which
+// prefixes unicode literals and other escaped characters.
+func isEscape(r rune) bool {
+ return r == '\\'
+}
+
+// isEscapedCharacter reports whether we are at one of the characters that need escaping.
+// The escape character has already been consumed.
+func isEscapedCharacter(r rune) bool {
+ return strings.ContainsRune(" :=fnrt", r)
+}
+
+// isWhitespace reports whether the rune is a whitespace character.
+func isWhitespace(r rune) bool {
+ return strings.ContainsRune(whitespace, r)
+}
diff --git a/vendor/github.com/magiconair/properties/load.go b/vendor/github.com/magiconair/properties/load.go
new file mode 100644
index 0000000000..3915c73976
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/load.go
@@ -0,0 +1,203 @@
+// Copyright 2016 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "strings"
+)
+
+// Encoding specifies encoding of the input data.
+type Encoding uint
+
+const (
+ // UTF8 interprets the input data as UTF-8.
+ UTF8 Encoding = 1 << iota
+
+ // ISO_8859_1 interprets the input data as ISO-8859-1.
+ ISO_8859_1
+)
+
+// Load reads a buffer into a Properties struct.
+func Load(buf []byte, enc Encoding) (*Properties, error) {
+ return loadBuf(buf, enc)
+}
+
+// LoadString reads an UTF8 string into a properties struct.
+func LoadString(s string) (*Properties, error) {
+ return loadBuf([]byte(s), UTF8)
+}
+
+// LoadFile reads a file into a Properties struct.
+func LoadFile(filename string, enc Encoding) (*Properties, error) {
+ return loadFiles([]string{filename}, enc, false)
+}
+
+// LoadFiles reads multiple files in the given order into
+// a Properties struct. If 'ignoreMissing' is true then
+// non-existent files will not be reported as error.
+func LoadFiles(filenames []string, enc Encoding, ignoreMissing bool) (*Properties, error) {
+ return loadFiles(filenames, enc, ignoreMissing)
+}
+
+// LoadURL reads the content of the URL into a Properties struct.
+//
+// The encoding is determined via the Content-Type header which
+// should be set to 'text/plain'. If the 'charset' parameter is
+// missing, 'iso-8859-1' or 'latin1' the encoding is set to
+// ISO-8859-1. If the 'charset' parameter is set to 'utf-8' the
+// encoding is set to UTF-8. A missing content type header is
+// interpreted as 'text/plain; charset=utf-8'.
+func LoadURL(url string) (*Properties, error) {
+ return loadURLs([]string{url}, false)
+}
+
+// LoadURLs reads the content of multiple URLs in the given order into a
+// Properties struct. If 'ignoreMissing' is true then a 404 status code will
+// not be reported as error. See LoadURL for the Content-Type header
+// and the encoding.
+func LoadURLs(urls []string, ignoreMissing bool) (*Properties, error) {
+ return loadURLs(urls, ignoreMissing)
+}
+
+// MustLoadString reads an UTF8 string into a Properties struct and
+// panics on error.
+func MustLoadString(s string) *Properties {
+ return must(LoadString(s))
+}
+
+// MustLoadFile reads a file into a Properties struct and
+// panics on error.
+func MustLoadFile(filename string, enc Encoding) *Properties {
+ return must(LoadFile(filename, enc))
+}
+
+// MustLoadFiles reads multiple files in the given order into
+// a Properties struct and panics on error. If 'ignoreMissing'
+// is true then non-existent files will not be reported as error.
+func MustLoadFiles(filenames []string, enc Encoding, ignoreMissing bool) *Properties {
+ return must(LoadFiles(filenames, enc, ignoreMissing))
+}
+
+// MustLoadURL reads the content of a URL into a Properties struct and
+// panics on error.
+func MustLoadURL(url string) *Properties {
+ return must(LoadURL(url))
+}
+
+// MustLoadFiles reads the content of multiple URLs in the given order into a
+// Properties struct and panics on error. If 'ignoreMissing' is true then a 404
+// status code will not be reported as error.
+func MustLoadURLs(urls []string, ignoreMissing bool) *Properties {
+ return must(LoadURLs(urls, ignoreMissing))
+}
+
+func loadBuf(buf []byte, enc Encoding) (*Properties, error) {
+ p, err := parse(convert(buf, enc))
+ if err != nil {
+ return nil, err
+ }
+ return p, p.check()
+}
+
+func loadFiles(filenames []string, enc Encoding, ignoreMissing bool) (*Properties, error) {
+ var buf bytes.Buffer
+ for _, filename := range filenames {
+ f, err := expandFilename(filename)
+ if err != nil {
+ return nil, err
+ }
+
+ data, err := ioutil.ReadFile(f)
+ if err != nil {
+ if ignoreMissing && os.IsNotExist(err) {
+ LogPrintf("properties: %s not found. skipping", filename)
+ continue
+ }
+ return nil, err
+ }
+
+ // concatenate the buffers and add a new line in case
+ // the previous file didn't end with a new line
+ buf.Write(data)
+ buf.WriteRune('\n')
+ }
+ return loadBuf(buf.Bytes(), enc)
+}
+
+func loadURLs(urls []string, ignoreMissing bool) (*Properties, error) {
+ var buf bytes.Buffer
+ for _, u := range urls {
+ resp, err := http.Get(u)
+ if err != nil {
+ return nil, fmt.Errorf("properties: error fetching %q. %s", u, err)
+ }
+ if resp.StatusCode == 404 && ignoreMissing {
+ LogPrintf("properties: %s returned %d. skipping", u, resp.StatusCode)
+ continue
+ }
+ if resp.StatusCode != 200 {
+ return nil, fmt.Errorf("properties: %s returned %d", u, resp.StatusCode)
+ }
+ body, err := ioutil.ReadAll(resp.Body)
+ resp.Body.Close()
+ if err != nil {
+ return nil, fmt.Errorf("properties: %s error reading response. %s", u, err)
+ }
+
+ ct := resp.Header.Get("Content-Type")
+ var enc Encoding
+ switch strings.ToLower(ct) {
+ case "text/plain", "text/plain; charset=iso-8859-1", "text/plain; charset=latin1":
+ enc = ISO_8859_1
+ case "", "text/plain; charset=utf-8":
+ enc = UTF8
+ default:
+ return nil, fmt.Errorf("properties: invalid content type %s", ct)
+ }
+
+ buf.WriteString(convert(body, enc))
+ buf.WriteRune('\n')
+ }
+ return loadBuf(buf.Bytes(), UTF8)
+}
+
+func must(p *Properties, err error) *Properties {
+ if err != nil {
+ ErrorHandler(err)
+ }
+ return p
+}
+
+// expandFilename expands ${ENV_VAR} expressions in a filename.
+// If the environment variable does not exist then it will be replaced
+// with an empty string. Malformed expressions like "${ENV_VAR" will
+// be reported as error.
+func expandFilename(filename string) (string, error) {
+ return expand(filename, make(map[string]bool), "${", "}", make(map[string]string))
+}
+
+// Interprets a byte buffer either as an ISO-8859-1 or UTF-8 encoded string.
+// For ISO-8859-1 we can convert each byte straight into a rune since the
+// first 256 unicode code points cover ISO-8859-1.
+func convert(buf []byte, enc Encoding) string {
+ switch enc {
+ case UTF8:
+ return string(buf)
+ case ISO_8859_1:
+ runes := make([]rune, len(buf))
+ for i, b := range buf {
+ runes[i] = rune(b)
+ }
+ return string(runes)
+ default:
+ ErrorHandler(fmt.Errorf("unsupported encoding %v", enc))
+ }
+ panic("ErrorHandler should exit")
+}
diff --git a/vendor/github.com/magiconair/properties/load_test.go b/vendor/github.com/magiconair/properties/load_test.go
new file mode 100644
index 0000000000..f95b948382
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/load_test.go
@@ -0,0 +1,204 @@
+// Copyright 2016 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "strings"
+
+ . "github.com/magiconair/properties/_third_party/gopkg.in/check.v1"
+)
+
+type LoadSuite struct {
+ tempFiles []string
+}
+
+var _ = Suite(&LoadSuite{})
+
+func (s *LoadSuite) TestLoadFailsWithNotExistingFile(c *C) {
+ _, err := LoadFile("doesnotexist.properties", ISO_8859_1)
+ c.Assert(err, NotNil)
+ c.Assert(err, ErrorMatches, "open.*no such file or directory")
+}
+
+func (s *LoadSuite) TestLoadFilesFailsOnNotExistingFile(c *C) {
+ _, err := LoadFiles([]string{"doesnotexist.properties"}, ISO_8859_1, false)
+ c.Assert(err, NotNil)
+ c.Assert(err, ErrorMatches, "open.*no such file or directory")
+}
+
+func (s *LoadSuite) TestLoadFilesDoesNotFailOnNotExistingFileAndIgnoreMissing(c *C) {
+ p, err := LoadFiles([]string{"doesnotexist.properties"}, ISO_8859_1, true)
+ c.Assert(err, IsNil)
+ c.Assert(p.Len(), Equals, 0)
+}
+
+func (s *LoadSuite) TestLoadString(c *C) {
+ x := "key=äüö"
+ p1 := MustLoadString(x)
+ p2 := must(Load([]byte(x), UTF8))
+ c.Assert(p1, DeepEquals, p2)
+}
+
+func (s *LoadSuite) TestLoadFile(c *C) {
+ filename := s.makeFile(c, "key=value")
+ p := MustLoadFile(filename, ISO_8859_1)
+
+ c.Assert(p.Len(), Equals, 1)
+ assertKeyValues(c, "", p, "key", "value")
+}
+
+func (s *LoadSuite) TestLoadFiles(c *C) {
+ filename := s.makeFile(c, "key=value")
+ filename2 := s.makeFile(c, "key2=value2")
+ p := MustLoadFiles([]string{filename, filename2}, ISO_8859_1, false)
+ assertKeyValues(c, "", p, "key", "value", "key2", "value2")
+}
+
+func (s *LoadSuite) TestLoadExpandedFile(c *C) {
+ filename := s.makeFilePrefix(c, os.Getenv("USER"), "key=value")
+ filename = strings.Replace(filename, os.Getenv("USER"), "${USER}", -1)
+ p := MustLoadFile(filename, ISO_8859_1)
+ assertKeyValues(c, "", p, "key", "value")
+}
+
+func (s *LoadSuite) TestLoadFilesAndIgnoreMissing(c *C) {
+ filename := s.makeFile(c, "key=value")
+ filename2 := s.makeFile(c, "key2=value2")
+ p := MustLoadFiles([]string{filename, filename + "foo", filename2, filename2 + "foo"}, ISO_8859_1, true)
+ assertKeyValues(c, "", p, "key", "value", "key2", "value2")
+}
+
+func (s *LoadSuite) TestLoadURL(c *C) {
+ srv := testServer()
+ defer srv.Close()
+ p := MustLoadURL(srv.URL + "/a")
+ assertKeyValues(c, "", p, "key", "value")
+}
+
+func (s *LoadSuite) TestLoadURLs(c *C) {
+ srv := testServer()
+ defer srv.Close()
+ p := MustLoadURLs([]string{srv.URL + "/a", srv.URL + "/b"}, false)
+ assertKeyValues(c, "", p, "key", "value", "key2", "value2")
+}
+
+func (s *LoadSuite) TestLoadURLsAndFailMissing(c *C) {
+ srv := testServer()
+ defer srv.Close()
+ p, err := LoadURLs([]string{srv.URL + "/a", srv.URL + "/c"}, false)
+ c.Assert(p, IsNil)
+ c.Assert(err, ErrorMatches, ".*returned 404.*")
+}
+
+func (s *LoadSuite) TestLoadURLsAndIgnoreMissing(c *C) {
+ srv := testServer()
+ defer srv.Close()
+ p := MustLoadURLs([]string{srv.URL + "/a", srv.URL + "/b", srv.URL + "/c"}, true)
+ assertKeyValues(c, "", p, "key", "value", "key2", "value2")
+}
+
+func (s *LoadSuite) TestLoadURLEncoding(c *C) {
+ srv := testServer()
+ defer srv.Close()
+
+ uris := []string{"/none", "/utf8", "/plain", "/latin1", "/iso88591"}
+ for i, uri := range uris {
+ p := MustLoadURL(srv.URL + uri)
+ c.Assert(p.GetString("key", ""), Equals, "äöü", Commentf("%d", i))
+ }
+}
+
+func (s *LoadSuite) TestLoadURLFailInvalidEncoding(c *C) {
+ srv := testServer()
+ defer srv.Close()
+
+ p, err := LoadURL(srv.URL + "/json")
+ c.Assert(p, IsNil)
+ c.Assert(err, ErrorMatches, ".*invalid content type.*")
+}
+
+func (s *LoadSuite) SetUpSuite(c *C) {
+ s.tempFiles = make([]string, 0)
+}
+
+func (s *LoadSuite) TearDownSuite(c *C) {
+ for _, path := range s.tempFiles {
+ err := os.Remove(path)
+ if err != nil {
+ fmt.Printf("os.Remove: %v", err)
+ }
+ }
+}
+
+func (s *LoadSuite) makeFile(c *C, data string) string {
+ return s.makeFilePrefix(c, "properties", data)
+}
+
+func (s *LoadSuite) makeFilePrefix(c *C, prefix, data string) string {
+ f, err := ioutil.TempFile("", prefix)
+ if err != nil {
+ fmt.Printf("ioutil.TempFile: %v", err)
+ c.FailNow()
+ }
+
+ // remember the temp file so that we can remove it later
+ s.tempFiles = append(s.tempFiles, f.Name())
+
+ n, err := fmt.Fprint(f, data)
+ if err != nil {
+ fmt.Printf("fmt.Fprintln: %v", err)
+ c.FailNow()
+ }
+ if n != len(data) {
+ fmt.Printf("Data size mismatch. expected=%d wrote=%d\n", len(data), n)
+ c.FailNow()
+ }
+
+ err = f.Close()
+ if err != nil {
+ fmt.Printf("f.Close: %v", err)
+ c.FailNow()
+ }
+
+ return f.Name()
+}
+
+func testServer() *httptest.Server {
+ return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ send := func(data []byte, contentType string) {
+ w.Header().Set("Content-Type", contentType)
+ w.Write(data)
+ }
+
+ utf8 := []byte("key=äöü")
+ iso88591 := []byte{0x6b, 0x65, 0x79, 0x3d, 0xe4, 0xf6, 0xfc} // key=äöü
+
+ switch r.RequestURI {
+ case "/a":
+ send([]byte("key=value"), "")
+ case "/b":
+ send([]byte("key2=value2"), "")
+ case "/none":
+ send(utf8, "")
+ case "/utf8":
+ send(utf8, "text/plain; charset=utf-8")
+ case "/json":
+ send(utf8, "application/json; charset=utf-8")
+ case "/plain":
+ send(iso88591, "text/plain")
+ case "/latin1":
+ send(iso88591, "text/plain; charset=latin1")
+ case "/iso88591":
+ send(iso88591, "text/plain; charset=iso-8859-1")
+ default:
+ w.WriteHeader(404)
+ }
+ }))
+}
diff --git a/vendor/github.com/magiconair/properties/parser.go b/vendor/github.com/magiconair/properties/parser.go
new file mode 100644
index 0000000000..ff0e1e1578
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/parser.go
@@ -0,0 +1,95 @@
+// Copyright 2016 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import (
+ "fmt"
+ "runtime"
+)
+
+type parser struct {
+ lex *lexer
+}
+
+func parse(input string) (properties *Properties, err error) {
+ p := &parser{lex: lex(input)}
+ defer p.recover(&err)
+
+ properties = NewProperties()
+ key := ""
+ comments := []string{}
+
+ for {
+ token := p.expectOneOf(itemComment, itemKey, itemEOF)
+ switch token.typ {
+ case itemEOF:
+ goto done
+ case itemComment:
+ comments = append(comments, token.val)
+ continue
+ case itemKey:
+ key = token.val
+ if _, ok := properties.m[key]; !ok {
+ properties.k = append(properties.k, key)
+ }
+ }
+
+ token = p.expectOneOf(itemValue, itemEOF)
+ if len(comments) > 0 {
+ properties.c[key] = comments
+ comments = []string{}
+ }
+ switch token.typ {
+ case itemEOF:
+ properties.m[key] = ""
+ goto done
+ case itemValue:
+ properties.m[key] = token.val
+ }
+ }
+
+done:
+ return properties, nil
+}
+
+func (p *parser) errorf(format string, args ...interface{}) {
+ format = fmt.Sprintf("properties: Line %d: %s", p.lex.lineNumber(), format)
+ panic(fmt.Errorf(format, args...))
+}
+
+func (p *parser) expect(expected itemType) (token item) {
+ token = p.lex.nextItem()
+ if token.typ != expected {
+ p.unexpected(token)
+ }
+ return token
+}
+
+func (p *parser) expectOneOf(expected ...itemType) (token item) {
+ token = p.lex.nextItem()
+ for _, v := range expected {
+ if token.typ == v {
+ return token
+ }
+ }
+ p.unexpected(token)
+ panic("unexpected token")
+}
+
+func (p *parser) unexpected(token item) {
+ p.errorf(token.String())
+}
+
+// recover is the handler that turns panics into returns from the top level of Parse.
+func (p *parser) recover(errp *error) {
+ e := recover()
+ if e != nil {
+ if _, ok := e.(runtime.Error); ok {
+ panic(e)
+ }
+ *errp = e.(error)
+ }
+ return
+}
diff --git a/vendor/github.com/magiconair/properties/properties.go b/vendor/github.com/magiconair/properties/properties.go
new file mode 100644
index 0000000000..884ef4e079
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/properties.go
@@ -0,0 +1,750 @@
+// Copyright 2016 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+// BUG(frank): Set() does not check for invalid unicode literals since this is currently handled by the lexer.
+// BUG(frank): Write() does not allow to configure the newline character. Therefore, on Windows LF is used.
+
+import (
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+// ErrorHandlerFunc defines the type of function which handles failures
+// of the MustXXX() functions. An error handler function must exit
+// the application after handling the error.
+type ErrorHandlerFunc func(error)
+
+// ErrorHandler is the function which handles failures of the MustXXX()
+// functions. The default is LogFatalHandler.
+var ErrorHandler ErrorHandlerFunc = LogFatalHandler
+
+type LogHandlerFunc func(fmt string, args ...interface{})
+
+var LogPrintf LogHandlerFunc = log.Printf
+
+// LogFatalHandler handles the error by logging a fatal error and exiting.
+func LogFatalHandler(err error) {
+ log.Fatal(err)
+}
+
+// PanicHandler handles the error by panicking.
+func PanicHandler(err error) {
+ panic(err)
+}
+
+// -----------------------------------------------------------------------------
+
+// A Properties contains the key/value pairs from the properties input.
+// All values are stored in unexpanded form and are expanded at runtime
+type Properties struct {
+ // Pre-/Postfix for property expansion.
+ Prefix string
+ Postfix string
+
+ // DisableExpansion controls the expansion of properties on Get()
+ // and the check for circular references on Set(). When set to
+ // true Properties behaves like a simple key/value store and does
+ // not check for circular references on Get() or on Set().
+ DisableExpansion bool
+
+ // Stores the key/value pairs
+ m map[string]string
+
+ // Stores the comments per key.
+ c map[string][]string
+
+ // Stores the keys in order of appearance.
+ k []string
+}
+
+// NewProperties creates a new Properties struct with the default
+// configuration for "${key}" expressions.
+func NewProperties() *Properties {
+ return &Properties{
+ Prefix: "${",
+ Postfix: "}",
+ m: map[string]string{},
+ c: map[string][]string{},
+ k: []string{},
+ }
+}
+
+// Get returns the expanded value for the given key if exists.
+// Otherwise, ok is false.
+func (p *Properties) Get(key string) (value string, ok bool) {
+ v, ok := p.m[key]
+ if p.DisableExpansion {
+ return v, ok
+ }
+ if !ok {
+ return "", false
+ }
+
+ expanded, err := p.expand(v)
+
+ // we guarantee that the expanded value is free of
+ // circular references and malformed expressions
+ // so we panic if we still get an error here.
+ if err != nil {
+ ErrorHandler(fmt.Errorf("%s in %q", err, key+" = "+v))
+ }
+
+ return expanded, true
+}
+
+// MustGet returns the expanded value for the given key if exists.
+// Otherwise, it panics.
+func (p *Properties) MustGet(key string) string {
+ if v, ok := p.Get(key); ok {
+ return v
+ }
+ ErrorHandler(invalidKeyError(key))
+ panic("ErrorHandler should exit")
+}
+
+// ----------------------------------------------------------------------------
+
+// ClearComments removes the comments for all keys.
+func (p *Properties) ClearComments() {
+ p.c = map[string][]string{}
+}
+
+// ----------------------------------------------------------------------------
+
+// GetComment returns the last comment before the given key or an empty string.
+func (p *Properties) GetComment(key string) string {
+ comments, ok := p.c[key]
+ if !ok || len(comments) == 0 {
+ return ""
+ }
+ return comments[len(comments)-1]
+}
+
+// ----------------------------------------------------------------------------
+
+// GetComments returns all comments that appeared before the given key or nil.
+func (p *Properties) GetComments(key string) []string {
+ if comments, ok := p.c[key]; ok {
+ return comments
+ }
+ return nil
+}
+
+// ----------------------------------------------------------------------------
+
+// SetComment sets the comment for the key.
+func (p *Properties) SetComment(key, comment string) {
+ p.c[key] = []string{comment}
+}
+
+// ----------------------------------------------------------------------------
+
+// SetComments sets the comments for the key. If the comments are nil then
+// all comments for this key are deleted.
+func (p *Properties) SetComments(key string, comments []string) {
+ if comments == nil {
+ delete(p.c, key)
+ return
+ }
+ p.c[key] = comments
+}
+
+// ----------------------------------------------------------------------------
+
+// GetBool checks if the expanded value is one of '1', 'yes',
+// 'true' or 'on' if the key exists. The comparison is case-insensitive.
+// If the key does not exist the default value is returned.
+func (p *Properties) GetBool(key string, def bool) bool {
+ v, err := p.getBool(key)
+ if err != nil {
+ return def
+ }
+ return v
+}
+
+// MustGetBool checks if the expanded value is one of '1', 'yes',
+// 'true' or 'on' if the key exists. The comparison is case-insensitive.
+// If the key does not exist the function panics.
+func (p *Properties) MustGetBool(key string) bool {
+ v, err := p.getBool(key)
+ if err != nil {
+ ErrorHandler(err)
+ }
+ return v
+}
+
+func (p *Properties) getBool(key string) (value bool, err error) {
+ if v, ok := p.Get(key); ok {
+ return boolVal(v), nil
+ }
+ return false, invalidKeyError(key)
+}
+
+func boolVal(v string) bool {
+ v = strings.ToLower(v)
+ return v == "1" || v == "true" || v == "yes" || v == "on"
+}
+
+// ----------------------------------------------------------------------------
+
+// GetDuration parses the expanded value as an time.Duration (in ns) if the
+// key exists. If key does not exist or the value cannot be parsed the default
+// value is returned. In almost all cases you want to use GetParsedDuration().
+func (p *Properties) GetDuration(key string, def time.Duration) time.Duration {
+ v, err := p.getInt64(key)
+ if err != nil {
+ return def
+ }
+ return time.Duration(v)
+}
+
+// MustGetDuration parses the expanded value as an time.Duration (in ns) if
+// the key exists. If key does not exist or the value cannot be parsed the
+// function panics. In almost all cases you want to use MustGetParsedDuration().
+func (p *Properties) MustGetDuration(key string) time.Duration {
+ v, err := p.getInt64(key)
+ if err != nil {
+ ErrorHandler(err)
+ }
+ return time.Duration(v)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetParsedDuration parses the expanded value with time.ParseDuration() if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned.
+func (p *Properties) GetParsedDuration(key string, def time.Duration) time.Duration {
+ s, ok := p.Get(key)
+ if !ok {
+ return def
+ }
+ v, err := time.ParseDuration(s)
+ if err != nil {
+ return def
+ }
+ return v
+}
+
+// MustGetParsedDuration parses the expanded value with time.ParseDuration() if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+func (p *Properties) MustGetParsedDuration(key string) time.Duration {
+ s, ok := p.Get(key)
+ if !ok {
+ ErrorHandler(invalidKeyError(key))
+ }
+ v, err := time.ParseDuration(s)
+ if err != nil {
+ ErrorHandler(err)
+ }
+ return v
+}
+
+// ----------------------------------------------------------------------------
+
+// GetFloat64 parses the expanded value as a float64 if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned.
+func (p *Properties) GetFloat64(key string, def float64) float64 {
+ v, err := p.getFloat64(key)
+ if err != nil {
+ return def
+ }
+ return v
+}
+
+// MustGetFloat64 parses the expanded value as a float64 if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+func (p *Properties) MustGetFloat64(key string) float64 {
+ v, err := p.getFloat64(key)
+ if err != nil {
+ ErrorHandler(err)
+ }
+ return v
+}
+
+func (p *Properties) getFloat64(key string) (value float64, err error) {
+ if v, ok := p.Get(key); ok {
+ value, err = strconv.ParseFloat(v, 64)
+ if err != nil {
+ return 0, err
+ }
+ return value, nil
+ }
+ return 0, invalidKeyError(key)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetInt parses the expanded value as an int if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned. If the value does not fit into an int the
+// function panics with an out of range error.
+func (p *Properties) GetInt(key string, def int) int {
+ v, err := p.getInt64(key)
+ if err != nil {
+ return def
+ }
+ return intRangeCheck(key, v)
+}
+
+// MustGetInt parses the expanded value as an int if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+// If the value does not fit into an int the function panics with
+// an out of range error.
+func (p *Properties) MustGetInt(key string) int {
+ v, err := p.getInt64(key)
+ if err != nil {
+ ErrorHandler(err)
+ }
+ return intRangeCheck(key, v)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetInt64 parses the expanded value as an int64 if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned.
+func (p *Properties) GetInt64(key string, def int64) int64 {
+ v, err := p.getInt64(key)
+ if err != nil {
+ return def
+ }
+ return v
+}
+
+// MustGetInt64 parses the expanded value as an int if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+func (p *Properties) MustGetInt64(key string) int64 {
+ v, err := p.getInt64(key)
+ if err != nil {
+ ErrorHandler(err)
+ }
+ return v
+}
+
+func (p *Properties) getInt64(key string) (value int64, err error) {
+ if v, ok := p.Get(key); ok {
+ value, err = strconv.ParseInt(v, 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ return value, nil
+ }
+ return 0, invalidKeyError(key)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetUint parses the expanded value as an uint if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned. If the value does not fit into an int the
+// function panics with an out of range error.
+func (p *Properties) GetUint(key string, def uint) uint {
+ v, err := p.getUint64(key)
+ if err != nil {
+ return def
+ }
+ return uintRangeCheck(key, v)
+}
+
+// MustGetUint parses the expanded value as an int if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+// If the value does not fit into an int the function panics with
+// an out of range error.
+func (p *Properties) MustGetUint(key string) uint {
+ v, err := p.getUint64(key)
+ if err != nil {
+ ErrorHandler(err)
+ }
+ return uintRangeCheck(key, v)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetUint64 parses the expanded value as an uint64 if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned.
+func (p *Properties) GetUint64(key string, def uint64) uint64 {
+ v, err := p.getUint64(key)
+ if err != nil {
+ return def
+ }
+ return v
+}
+
+// MustGetUint64 parses the expanded value as an int if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+func (p *Properties) MustGetUint64(key string) uint64 {
+ v, err := p.getUint64(key)
+ if err != nil {
+ ErrorHandler(err)
+ }
+ return v
+}
+
+func (p *Properties) getUint64(key string) (value uint64, err error) {
+ if v, ok := p.Get(key); ok {
+ value, err = strconv.ParseUint(v, 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ return value, nil
+ }
+ return 0, invalidKeyError(key)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetString returns the expanded value for the given key if exists or
+// the default value otherwise.
+func (p *Properties) GetString(key, def string) string {
+ if v, ok := p.Get(key); ok {
+ return v
+ }
+ return def
+}
+
+// MustGetString returns the expanded value for the given key if exists or
+// panics otherwise.
+func (p *Properties) MustGetString(key string) string {
+ if v, ok := p.Get(key); ok {
+ return v
+ }
+ ErrorHandler(invalidKeyError(key))
+ panic("ErrorHandler should exit")
+}
+
+// ----------------------------------------------------------------------------
+
+// Filter returns a new properties object which contains all properties
+// for which the key matches the pattern.
+func (p *Properties) Filter(pattern string) (*Properties, error) {
+ re, err := regexp.Compile(pattern)
+ if err != nil {
+ return nil, err
+ }
+
+ return p.FilterRegexp(re), nil
+}
+
+// FilterRegexp returns a new properties object which contains all properties
+// for which the key matches the regular expression.
+func (p *Properties) FilterRegexp(re *regexp.Regexp) *Properties {
+ pp := NewProperties()
+ for _, k := range p.k {
+ if re.MatchString(k) {
+ pp.Set(k, p.m[k])
+ }
+ }
+ return pp
+}
+
+// FilterPrefix returns a new properties object with a subset of all keys
+// with the given prefix.
+func (p *Properties) FilterPrefix(prefix string) *Properties {
+ pp := NewProperties()
+ for _, k := range p.k {
+ if strings.HasPrefix(k, prefix) {
+ pp.Set(k, p.m[k])
+ }
+ }
+ return pp
+}
+
+// FilterStripPrefix returns a new properties object with a subset of all keys
+// with the given prefix and the prefix removed from the keys.
+func (p *Properties) FilterStripPrefix(prefix string) *Properties {
+ pp := NewProperties()
+ n := len(prefix)
+ for _, k := range p.k {
+ if len(k) > len(prefix) && strings.HasPrefix(k, prefix) {
+ pp.Set(k[n:], p.m[k])
+ }
+ }
+ return pp
+}
+
+// Len returns the number of keys.
+func (p *Properties) Len() int {
+ return len(p.m)
+}
+
+// Keys returns all keys in the same order as in the input.
+func (p *Properties) Keys() []string {
+ keys := make([]string, len(p.k))
+ for i, k := range p.k {
+ keys[i] = k
+ }
+ return keys
+}
+
+// Set sets the property key to the corresponding value.
+// If a value for key existed before then ok is true and prev
+// contains the previous value. If the value contains a
+// circular reference or a malformed expression then
+// an error is returned.
+// An empty key is silently ignored.
+func (p *Properties) Set(key, value string) (prev string, ok bool, err error) {
+ if key == "" {
+ return "", false, nil
+ }
+
+ // if expansion is disabled we allow circular references
+ if p.DisableExpansion {
+ prev, ok = p.Get(key)
+ p.m[key] = value
+ return prev, ok, nil
+ }
+
+ // to check for a circular reference we temporarily need
+ // to set the new value. If there is an error then revert
+ // to the previous state. Only if all tests are successful
+ // then we add the key to the p.k list.
+ prev, ok = p.Get(key)
+ p.m[key] = value
+
+ // now check for a circular reference
+ _, err = p.expand(value)
+ if err != nil {
+
+ // revert to the previous state
+ if ok {
+ p.m[key] = prev
+ } else {
+ delete(p.m, key)
+ }
+
+ return "", false, err
+ }
+
+ if !ok {
+ p.k = append(p.k, key)
+ }
+
+ return prev, ok, nil
+}
+
+// MustSet sets the property key to the corresponding value.
+// If a value for key existed before then ok is true and prev
+// contains the previous value. An empty key is silently ignored.
+func (p *Properties) MustSet(key, value string) (prev string, ok bool) {
+ prev, ok, err := p.Set(key, value)
+ if err != nil {
+ ErrorHandler(err)
+ }
+ return prev, ok
+}
+
+// String returns a string of all expanded 'key = value' pairs.
+func (p *Properties) String() string {
+ var s string
+ for _, key := range p.k {
+ value, _ := p.Get(key)
+ s = fmt.Sprintf("%s%s = %s\n", s, key, value)
+ }
+ return s
+}
+
+// Write writes all unexpanded 'key = value' pairs to the given writer.
+// Write returns the number of bytes written and any write error encountered.
+func (p *Properties) Write(w io.Writer, enc Encoding) (n int, err error) {
+ return p.WriteComment(w, "", enc)
+}
+
+// WriteComment writes all unexpanced 'key = value' pairs to the given writer.
+// If prefix is not empty then comments are written with a blank line and the
+// given prefix. The prefix should be either "# " or "! " to be compatible with
+// the properties file format. Otherwise, the properties parser will not be
+// able to read the file back in. It returns the number of bytes written and
+// any write error encountered.
+func (p *Properties) WriteComment(w io.Writer, prefix string, enc Encoding) (n int, err error) {
+ var x int
+
+ for _, key := range p.k {
+ value := p.m[key]
+
+ if prefix != "" {
+ if comments, ok := p.c[key]; ok {
+ // don't print comments if they are all empty
+ allEmpty := true
+ for _, c := range comments {
+ if c != "" {
+ allEmpty = false
+ break
+ }
+ }
+
+ if !allEmpty {
+ // add a blank line between entries but not at the top
+ if len(comments) > 0 && n > 0 {
+ x, err = fmt.Fprintln(w)
+ if err != nil {
+ return
+ }
+ n += x
+ }
+
+ for _, c := range comments {
+ x, err = fmt.Fprintf(w, "%s%s\n", prefix, encode(c, "", enc))
+ if err != nil {
+ return
+ }
+ n += x
+ }
+ }
+ }
+ }
+
+ x, err = fmt.Fprintf(w, "%s = %s\n", encode(key, " :", enc), encode(value, "", enc))
+ if err != nil {
+ return
+ }
+ n += x
+ }
+ return
+}
+
+// ----------------------------------------------------------------------------
+
+// Delete removes the key and its comments.
+func (p *Properties) Delete(key string) {
+ delete(p.m, key)
+ delete(p.c, key)
+ newKeys := []string{}
+ for _, k := range p.k {
+ if k != key {
+ newKeys = append(newKeys, key)
+ }
+ }
+ p.k = newKeys
+}
+
+// ----------------------------------------------------------------------------
+
+// check expands all values and returns an error if a circular reference or
+// a malformed expression was found.
+func (p *Properties) check() error {
+ for _, value := range p.m {
+ if _, err := p.expand(value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (p *Properties) expand(input string) (string, error) {
+ // no pre/postfix -> nothing to expand
+ if p.Prefix == "" && p.Postfix == "" {
+ return input, nil
+ }
+
+ return expand(input, make(map[string]bool), p.Prefix, p.Postfix, p.m)
+}
+
+// expand recursively expands expressions of '(prefix)key(postfix)' to their corresponding values.
+// The function keeps track of the keys that were already expanded and stops if it
+// detects a circular reference or a malformed expression of the form '(prefix)key'.
+func expand(s string, keys map[string]bool, prefix, postfix string, values map[string]string) (string, error) {
+ start := strings.Index(s, prefix)
+ if start == -1 {
+ return s, nil
+ }
+
+ keyStart := start + len(prefix)
+ keyLen := strings.Index(s[keyStart:], postfix)
+ if keyLen == -1 {
+ return "", fmt.Errorf("malformed expression")
+ }
+
+ end := keyStart + keyLen + len(postfix) - 1
+ key := s[keyStart : keyStart+keyLen]
+
+ // fmt.Printf("s:%q pp:%q start:%d end:%d keyStart:%d keyLen:%d key:%q\n", s, prefix + "..." + postfix, start, end, keyStart, keyLen, key)
+
+ if _, ok := keys[key]; ok {
+ return "", fmt.Errorf("circular reference")
+ }
+
+ val, ok := values[key]
+ if !ok {
+ val = os.Getenv(key)
+ }
+
+ // remember that we've seen the key
+ keys[key] = true
+
+ return expand(s[:start]+val+s[end+1:], keys, prefix, postfix, values)
+}
+
+// encode encodes a UTF-8 string to ISO-8859-1 and escapes some characters.
+func encode(s string, special string, enc Encoding) string {
+ switch enc {
+ case UTF8:
+ return encodeUtf8(s, special)
+ case ISO_8859_1:
+ return encodeIso(s, special)
+ default:
+ panic(fmt.Sprintf("unsupported encoding %v", enc))
+ }
+}
+
+func encodeUtf8(s string, special string) string {
+ v := ""
+ for pos := 0; pos < len(s); {
+ r, w := utf8.DecodeRuneInString(s[pos:])
+ pos += w
+ v += escape(r, special)
+ }
+ return v
+}
+
+func encodeIso(s string, special string) string {
+ var r rune
+ var w int
+ var v string
+ for pos := 0; pos < len(s); {
+ switch r, w = utf8.DecodeRuneInString(s[pos:]); {
+ case r < 1<<8: // single byte rune -> escape special chars only
+ v += escape(r, special)
+ case r < 1<<16: // two byte rune -> unicode literal
+ v += fmt.Sprintf("\\u%04x", r)
+ default: // more than two bytes per rune -> can't encode
+ v += "?"
+ }
+ pos += w
+ }
+ return v
+}
+
+func escape(r rune, special string) string {
+ switch r {
+ case '\f':
+ return "\\f"
+ case '\n':
+ return "\\n"
+ case '\r':
+ return "\\r"
+ case '\t':
+ return "\\t"
+ default:
+ if strings.ContainsRune(special, r) {
+ return "\\" + string(r)
+ }
+ return string(r)
+ }
+}
+
+func invalidKeyError(key string) error {
+ return fmt.Errorf("unknown property: %s", key)
+}
diff --git a/vendor/github.com/magiconair/properties/properties_test.go b/vendor/github.com/magiconair/properties/properties_test.go
new file mode 100644
index 0000000000..c0af16e7e9
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/properties_test.go
@@ -0,0 +1,906 @@
+// Copyright 2016 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "math"
+ "os"
+ "strings"
+ "testing"
+ "time"
+
+ . "github.com/magiconair/properties/_third_party/gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type TestSuite struct {
+ prevHandler ErrorHandlerFunc
+}
+
+var (
+ _ = Suite(&TestSuite{})
+ verbose = flag.Bool("verbose", false, "Verbose output")
+)
+
+// --------------------------------------------------------------------
+
+func (s *TestSuite) SetUpSuite(c *C) {
+ s.prevHandler = ErrorHandler
+ ErrorHandler = PanicHandler
+}
+
+// --------------------------------------------------------------------
+
+func (s *TestSuite) TearDownSuite(c *C) {
+ ErrorHandler = s.prevHandler
+}
+
+// ----------------------------------------------------------------------------
+
+// define test cases in the form of
+// {"input", "key1", "value1", "key2", "value2", ...}
+var complexTests = [][]string{
+ // whitespace prefix
+ {" key=value", "key", "value"}, // SPACE prefix
+ {"\fkey=value", "key", "value"}, // FF prefix
+ {"\tkey=value", "key", "value"}, // TAB prefix
+ {" \f\tkey=value", "key", "value"}, // mix prefix
+
+ // multiple keys
+ {"key1=value1\nkey2=value2\n", "key1", "value1", "key2", "value2"},
+ {"key1=value1\rkey2=value2\r", "key1", "value1", "key2", "value2"},
+ {"key1=value1\r\nkey2=value2\r\n", "key1", "value1", "key2", "value2"},
+
+ // blank lines
+ {"\nkey=value\n", "key", "value"},
+ {"\rkey=value\r", "key", "value"},
+ {"\r\nkey=value\r\n", "key", "value"},
+
+ // escaped chars in key
+ {"k\\ ey = value", "k ey", "value"},
+ {"k\\:ey = value", "k:ey", "value"},
+ {"k\\=ey = value", "k=ey", "value"},
+ {"k\\fey = value", "k\fey", "value"},
+ {"k\\ney = value", "k\ney", "value"},
+ {"k\\rey = value", "k\rey", "value"},
+ {"k\\tey = value", "k\tey", "value"},
+
+ // escaped chars in value
+ {"key = v\\ alue", "key", "v alue"},
+ {"key = v\\:alue", "key", "v:alue"},
+ {"key = v\\=alue", "key", "v=alue"},
+ {"key = v\\falue", "key", "v\falue"},
+ {"key = v\\nalue", "key", "v\nalue"},
+ {"key = v\\ralue", "key", "v\ralue"},
+ {"key = v\\talue", "key", "v\talue"},
+
+ // silently dropped escape character
+ {"k\\zey = value", "kzey", "value"},
+ {"key = v\\zalue", "key", "vzalue"},
+
+ // unicode literals
+ {"key\\u2318 = value", "key⌘", "value"},
+ {"k\\u2318ey = value", "k⌘ey", "value"},
+ {"key = value\\u2318", "key", "value⌘"},
+ {"key = valu\\u2318e", "key", "valu⌘e"},
+
+ // multiline values
+ {"key = valueA,\\\n valueB", "key", "valueA,valueB"}, // SPACE indent
+ {"key = valueA,\\\n\f\f\fvalueB", "key", "valueA,valueB"}, // FF indent
+ {"key = valueA,\\\n\t\t\tvalueB", "key", "valueA,valueB"}, // TAB indent
+ {"key = valueA,\\\n \f\tvalueB", "key", "valueA,valueB"}, // mix indent
+
+ // comments
+ {"# this is a comment\n! and so is this\nkey1=value1\nkey#2=value#2\n\nkey!3=value!3\n# and another one\n! and the final one", "key1", "value1", "key#2", "value#2", "key!3", "value!3"},
+
+ // expansion tests
+ {"key=value\nkey2=${key}", "key", "value", "key2", "value"},
+ {"key=value\nkey2=aa${key}", "key", "value", "key2", "aavalue"},
+ {"key=value\nkey2=${key}bb", "key", "value", "key2", "valuebb"},
+ {"key=value\nkey2=aa${key}bb", "key", "value", "key2", "aavaluebb"},
+ {"key=value\nkey2=${key}\nkey3=${key2}", "key", "value", "key2", "value", "key3", "value"},
+ {"key=${USER}", "key", os.Getenv("USER")},
+ {"key=${USER}\nUSER=value", "key", "value", "USER", "value"},
+}
+
+// ----------------------------------------------------------------------------
+
+var commentTests = []struct {
+ input, key, value string
+ comments []string
+}{
+ {"key=value", "key", "value", nil},
+ {"#\nkey=value", "key", "value", []string{""}},
+ {"#comment\nkey=value", "key", "value", []string{"comment"}},
+ {"# comment\nkey=value", "key", "value", []string{"comment"}},
+ {"# comment\nkey=value", "key", "value", []string{"comment"}},
+ {"# comment\n\nkey=value", "key", "value", []string{"comment"}},
+ {"# comment1\n# comment2\nkey=value", "key", "value", []string{"comment1", "comment2"}},
+ {"# comment1\n\n# comment2\n\nkey=value", "key", "value", []string{"comment1", "comment2"}},
+ {"!comment\nkey=value", "key", "value", []string{"comment"}},
+ {"! comment\nkey=value", "key", "value", []string{"comment"}},
+ {"! comment\nkey=value", "key", "value", []string{"comment"}},
+ {"! comment\n\nkey=value", "key", "value", []string{"comment"}},
+ {"! comment1\n! comment2\nkey=value", "key", "value", []string{"comment1", "comment2"}},
+ {"! comment1\n\n! comment2\n\nkey=value", "key", "value", []string{"comment1", "comment2"}},
+}
+
+// ----------------------------------------------------------------------------
+
+var errorTests = []struct {
+ input, msg string
+}{
+ // unicode literals
+ {"key\\u1 = value", "invalid unicode literal"},
+ {"key\\u12 = value", "invalid unicode literal"},
+ {"key\\u123 = value", "invalid unicode literal"},
+ {"key\\u123g = value", "invalid unicode literal"},
+ {"key\\u123", "invalid unicode literal"},
+
+ // circular references
+ {"key=${key}", "circular reference"},
+ {"key1=${key2}\nkey2=${key1}", "circular reference"},
+
+ // malformed expressions
+ {"key=${ke", "malformed expression"},
+ {"key=valu${ke", "malformed expression"},
+}
+
+// ----------------------------------------------------------------------------
+
+var writeTests = []struct {
+ input, output, encoding string
+}{
+ // ISO-8859-1 tests
+ {"key = value", "key = value\n", "ISO-8859-1"},
+ {"key = value \\\n continued", "key = value continued\n", "ISO-8859-1"},
+ {"key⌘ = value", "key\\u2318 = value\n", "ISO-8859-1"},
+ {"ke\\ \\:y = value", "ke\\ \\:y = value\n", "ISO-8859-1"},
+
+ // UTF-8 tests
+ {"key = value", "key = value\n", "UTF-8"},
+ {"key = value \\\n continued", "key = value continued\n", "UTF-8"},
+ {"key⌘ = value⌘", "key⌘ = value⌘\n", "UTF-8"},
+ {"ke\\ \\:y = value", "ke\\ \\:y = value\n", "UTF-8"},
+}
+
+// ----------------------------------------------------------------------------
+
+var writeCommentTests = []struct {
+ input, output, encoding string
+}{
+ // ISO-8859-1 tests
+ {"key = value", "key = value\n", "ISO-8859-1"},
+ {"#\nkey = value", "key = value\n", "ISO-8859-1"},
+ {"#\n#\n#\nkey = value", "key = value\n", "ISO-8859-1"},
+ {"# comment\nkey = value", "# comment\nkey = value\n", "ISO-8859-1"},
+ {"\n# comment\nkey = value", "# comment\nkey = value\n", "ISO-8859-1"},
+ {"# comment\n\nkey = value", "# comment\nkey = value\n", "ISO-8859-1"},
+ {"# comment1\n# comment2\nkey = value", "# comment1\n# comment2\nkey = value\n", "ISO-8859-1"},
+ {"#comment1\nkey1 = value1\n#comment2\nkey2 = value2", "# comment1\nkey1 = value1\n\n# comment2\nkey2 = value2\n", "ISO-8859-1"},
+
+ // UTF-8 tests
+ {"key = value", "key = value\n", "UTF-8"},
+ {"# comment⌘\nkey = value⌘", "# comment⌘\nkey = value⌘\n", "UTF-8"},
+ {"\n# comment⌘\nkey = value⌘", "# comment⌘\nkey = value⌘\n", "UTF-8"},
+ {"# comment⌘\n\nkey = value⌘", "# comment⌘\nkey = value⌘\n", "UTF-8"},
+ {"# comment1⌘\n# comment2⌘\nkey = value⌘", "# comment1⌘\n# comment2⌘\nkey = value⌘\n", "UTF-8"},
+ {"#comment1⌘\nkey1 = value1⌘\n#comment2⌘\nkey2 = value2⌘", "# comment1⌘\nkey1 = value1⌘\n\n# comment2⌘\nkey2 = value2⌘\n", "UTF-8"},
+}
+
+// ----------------------------------------------------------------------------
+
+var boolTests = []struct {
+ input, key string
+ def, value bool
+}{
+ // valid values for TRUE
+ {"key = 1", "key", false, true},
+ {"key = on", "key", false, true},
+ {"key = On", "key", false, true},
+ {"key = ON", "key", false, true},
+ {"key = true", "key", false, true},
+ {"key = True", "key", false, true},
+ {"key = TRUE", "key", false, true},
+ {"key = yes", "key", false, true},
+ {"key = Yes", "key", false, true},
+ {"key = YES", "key", false, true},
+
+ // valid values for FALSE (all other)
+ {"key = 0", "key", true, false},
+ {"key = off", "key", true, false},
+ {"key = false", "key", true, false},
+ {"key = no", "key", true, false},
+
+ // non existent key
+ {"key = true", "key2", false, false},
+}
+
+// ----------------------------------------------------------------------------
+
+var durationTests = []struct {
+ input, key string
+ def, value time.Duration
+}{
+ // valid values
+ {"key = 1", "key", 999, 1},
+ {"key = 0", "key", 999, 0},
+ {"key = -1", "key", 999, -1},
+ {"key = 0123", "key", 999, 123},
+
+ // invalid values
+ {"key = 0xff", "key", 999, 999},
+ {"key = 1.0", "key", 999, 999},
+ {"key = a", "key", 999, 999},
+
+ // non existent key
+ {"key = 1", "key2", 999, 999},
+}
+
+// ----------------------------------------------------------------------------
+
+var parsedDurationTests = []struct {
+ input, key string
+ def, value time.Duration
+}{
+ // valid values
+ {"key = -1ns", "key", 999, -1 * time.Nanosecond},
+ {"key = 300ms", "key", 999, 300 * time.Millisecond},
+ {"key = 5s", "key", 999, 5 * time.Second},
+ {"key = 3h", "key", 999, 3 * time.Hour},
+ {"key = 2h45m", "key", 999, 2*time.Hour + 45*time.Minute},
+
+ // invalid values
+ {"key = 0xff", "key", 999, 999},
+ {"key = 1.0", "key", 999, 999},
+ {"key = a", "key", 999, 999},
+ {"key = 1", "key", 999, 999},
+ {"key = 0", "key", 999, 0},
+
+ // non existent key
+ {"key = 1", "key2", 999, 999},
+}
+
+// ----------------------------------------------------------------------------
+
+var floatTests = []struct {
+ input, key string
+ def, value float64
+}{
+ // valid values
+ {"key = 1.0", "key", 999, 1.0},
+ {"key = 0.0", "key", 999, 0.0},
+ {"key = -1.0", "key", 999, -1.0},
+ {"key = 1", "key", 999, 1},
+ {"key = 0", "key", 999, 0},
+ {"key = -1", "key", 999, -1},
+ {"key = 0123", "key", 999, 123},
+
+ // invalid values
+ {"key = 0xff", "key", 999, 999},
+ {"key = a", "key", 999, 999},
+
+ // non existent key
+ {"key = 1", "key2", 999, 999},
+}
+
+// ----------------------------------------------------------------------------
+
+var int64Tests = []struct {
+ input, key string
+ def, value int64
+}{
+ // valid values
+ {"key = 1", "key", 999, 1},
+ {"key = 0", "key", 999, 0},
+ {"key = -1", "key", 999, -1},
+ {"key = 0123", "key", 999, 123},
+
+ // invalid values
+ {"key = 0xff", "key", 999, 999},
+ {"key = 1.0", "key", 999, 999},
+ {"key = a", "key", 999, 999},
+
+ // non existent key
+ {"key = 1", "key2", 999, 999},
+}
+
+// ----------------------------------------------------------------------------
+
+var uint64Tests = []struct {
+ input, key string
+ def, value uint64
+}{
+ // valid values
+ {"key = 1", "key", 999, 1},
+ {"key = 0", "key", 999, 0},
+ {"key = 0123", "key", 999, 123},
+
+ // invalid values
+ {"key = -1", "key", 999, 999},
+ {"key = 0xff", "key", 999, 999},
+ {"key = 1.0", "key", 999, 999},
+ {"key = a", "key", 999, 999},
+
+ // non existent key
+ {"key = 1", "key2", 999, 999},
+}
+
+// ----------------------------------------------------------------------------
+
+var stringTests = []struct {
+ input, key string
+ def, value string
+}{
+ // valid values
+ {"key = abc", "key", "def", "abc"},
+
+ // non existent key
+ {"key = abc", "key2", "def", "def"},
+}
+
+// ----------------------------------------------------------------------------
+
+var keysTests = []struct {
+ input string
+ keys []string
+}{
+ {"", []string{}},
+ {"key = abc", []string{"key"}},
+ {"key = abc\nkey2=def", []string{"key", "key2"}},
+ {"key2 = abc\nkey=def", []string{"key2", "key"}},
+ {"key = abc\nkey=def", []string{"key"}},
+}
+
+// ----------------------------------------------------------------------------
+
+var filterTests = []struct {
+ input string
+ pattern string
+ keys []string
+ err string
+}{
+ {"", "", []string{}, ""},
+ {"", "abc", []string{}, ""},
+ {"key=value", "", []string{"key"}, ""},
+ {"key=value", "key=", []string{}, ""},
+ {"key=value\nfoo=bar", "", []string{"foo", "key"}, ""},
+ {"key=value\nfoo=bar", "f", []string{"foo"}, ""},
+ {"key=value\nfoo=bar", "fo", []string{"foo"}, ""},
+ {"key=value\nfoo=bar", "foo", []string{"foo"}, ""},
+ {"key=value\nfoo=bar", "fooo", []string{}, ""},
+ {"key=value\nkey2=value2\nfoo=bar", "ey", []string{"key", "key2"}, ""},
+ {"key=value\nkey2=value2\nfoo=bar", "key", []string{"key", "key2"}, ""},
+ {"key=value\nkey2=value2\nfoo=bar", "^key", []string{"key", "key2"}, ""},
+ {"key=value\nkey2=value2\nfoo=bar", "^(key|foo)", []string{"foo", "key", "key2"}, ""},
+ {"key=value\nkey2=value2\nfoo=bar", "[ abc", nil, "error parsing regexp.*"},
+}
+
+// ----------------------------------------------------------------------------
+
+var filterPrefixTests = []struct {
+ input string
+ prefix string
+ keys []string
+}{
+ {"", "", []string{}},
+ {"", "abc", []string{}},
+ {"key=value", "", []string{"key"}},
+ {"key=value", "key=", []string{}},
+ {"key=value\nfoo=bar", "", []string{"foo", "key"}},
+ {"key=value\nfoo=bar", "f", []string{"foo"}},
+ {"key=value\nfoo=bar", "fo", []string{"foo"}},
+ {"key=value\nfoo=bar", "foo", []string{"foo"}},
+ {"key=value\nfoo=bar", "fooo", []string{}},
+ {"key=value\nkey2=value2\nfoo=bar", "key", []string{"key", "key2"}},
+}
+
+// ----------------------------------------------------------------------------
+
+var filterStripPrefixTests = []struct {
+ input string
+ prefix string
+ keys []string
+}{
+ {"", "", []string{}},
+ {"", "abc", []string{}},
+ {"key=value", "", []string{"key"}},
+ {"key=value", "key=", []string{}},
+ {"key=value\nfoo=bar", "", []string{"foo", "key"}},
+ {"key=value\nfoo=bar", "f", []string{"foo"}},
+ {"key=value\nfoo=bar", "fo", []string{"foo"}},
+ {"key=value\nfoo=bar", "foo", []string{"foo"}},
+ {"key=value\nfoo=bar", "fooo", []string{}},
+ {"key=value\nkey2=value2\nfoo=bar", "key", []string{"key", "key2"}},
+}
+
+// ----------------------------------------------------------------------------
+
+var setTests = []struct {
+ input string
+ key, value string
+ prev string
+ ok bool
+ err string
+ keys []string
+}{
+ {"", "", "", "", false, "", []string{}},
+ {"", "key", "value", "", false, "", []string{"key"}},
+ {"key=value", "key2", "value2", "", false, "", []string{"key", "key2"}},
+ {"key=value", "abc", "value3", "", false, "", []string{"key", "abc"}},
+ {"key=value", "key", "value3", "value", true, "", []string{"key"}},
+}
+
+// ----------------------------------------------------------------------------
+
+// TestBasic tests basic single key/value combinations with all possible
+// whitespace, delimiter and newline permutations.
+func (s *TestSuite) TestBasic(c *C) {
+ testWhitespaceAndDelimiterCombinations(c, "key", "")
+ testWhitespaceAndDelimiterCombinations(c, "key", "value")
+ testWhitespaceAndDelimiterCombinations(c, "key", "value ")
+}
+
+func (s *TestSuite) TestComplex(c *C) {
+ for _, test := range complexTests {
+ testKeyValue(c, test[0], test[1:]...)
+ }
+}
+
+func (s *TestSuite) TestErrors(c *C) {
+ for _, test := range errorTests {
+ _, err := Load([]byte(test.input), ISO_8859_1)
+ c.Assert(err, NotNil)
+ c.Assert(strings.Contains(err.Error(), test.msg), Equals, true, Commentf("Expected %q got %q", test.msg, err.Error()))
+ }
+}
+
+func (s *TestSuite) TestDisableExpansion(c *C) {
+ input := "key=value\nkey2=${key}"
+ p, err := parse(input)
+ p.DisableExpansion = true
+ c.Assert(err, IsNil)
+ c.Assert(p.MustGet("key"), Equals, "value")
+ c.Assert(p.MustGet("key2"), Equals, "${key}")
+
+ // with expansion disabled we can introduce circular references
+ p.Set("keyA", "${keyB}")
+ p.Set("keyB", "${keyA}")
+ c.Assert(p.MustGet("keyA"), Equals, "${keyB}")
+ c.Assert(p.MustGet("keyB"), Equals, "${keyA}")
+}
+
+func (s *TestSuite) TestMustGet(c *C) {
+ input := "key = value\nkey2 = ghi"
+ p, err := parse(input)
+ c.Assert(err, IsNil)
+ c.Assert(p.MustGet("key"), Equals, "value")
+ c.Assert(func() { p.MustGet("invalid") }, PanicMatches, "unknown property: invalid")
+}
+
+func (s *TestSuite) TestGetBool(c *C) {
+ for _, test := range boolTests {
+ p, err := parse(test.input)
+ c.Assert(err, IsNil)
+ c.Assert(p.Len(), Equals, 1)
+ c.Assert(p.GetBool(test.key, test.def), Equals, test.value)
+ }
+}
+
+func (s *TestSuite) TestMustGetBool(c *C) {
+ input := "key = true\nkey2 = ghi"
+ p, err := parse(input)
+ c.Assert(err, IsNil)
+ c.Assert(p.MustGetBool("key"), Equals, true)
+ c.Assert(func() { p.MustGetBool("invalid") }, PanicMatches, "unknown property: invalid")
+}
+
+func (s *TestSuite) TestGetDuration(c *C) {
+ for _, test := range durationTests {
+ p, err := parse(test.input)
+ c.Assert(err, IsNil)
+ c.Assert(p.Len(), Equals, 1)
+ c.Assert(p.GetDuration(test.key, test.def), Equals, test.value)
+ }
+}
+
+func (s *TestSuite) TestMustGetDuration(c *C) {
+ input := "key = 123\nkey2 = ghi"
+ p, err := parse(input)
+ c.Assert(err, IsNil)
+ c.Assert(p.MustGetDuration("key"), Equals, time.Duration(123))
+ c.Assert(func() { p.MustGetDuration("key2") }, PanicMatches, "strconv.ParseInt: parsing.*")
+ c.Assert(func() { p.MustGetDuration("invalid") }, PanicMatches, "unknown property: invalid")
+}
+
+func (s *TestSuite) TestGetParsedDuration(c *C) {
+ for _, test := range parsedDurationTests {
+ p, err := parse(test.input)
+ c.Assert(err, IsNil)
+ c.Assert(p.Len(), Equals, 1)
+ c.Assert(p.GetParsedDuration(test.key, test.def), Equals, test.value)
+ }
+}
+
+func (s *TestSuite) TestMustGetParsedDuration(c *C) {
+ input := "key = 123ms\nkey2 = ghi"
+ p, err := parse(input)
+ c.Assert(err, IsNil)
+ c.Assert(p.MustGetParsedDuration("key"), Equals, 123*time.Millisecond)
+ c.Assert(func() { p.MustGetParsedDuration("key2") }, PanicMatches, "time: invalid duration ghi")
+ c.Assert(func() { p.MustGetParsedDuration("invalid") }, PanicMatches, "unknown property: invalid")
+}
+
+func (s *TestSuite) TestGetFloat64(c *C) {
+ for _, test := range floatTests {
+ p, err := parse(test.input)
+ c.Assert(err, IsNil)
+ c.Assert(p.Len(), Equals, 1)
+ c.Assert(p.GetFloat64(test.key, test.def), Equals, test.value)
+ }
+}
+
+func (s *TestSuite) TestMustGetFloat64(c *C) {
+ input := "key = 123\nkey2 = ghi"
+ p, err := parse(input)
+ c.Assert(err, IsNil)
+ c.Assert(p.MustGetFloat64("key"), Equals, float64(123))
+ c.Assert(func() { p.MustGetFloat64("key2") }, PanicMatches, "strconv.ParseFloat: parsing.*")
+ c.Assert(func() { p.MustGetFloat64("invalid") }, PanicMatches, "unknown property: invalid")
+}
+
+func (s *TestSuite) TestGetInt(c *C) {
+ for _, test := range int64Tests {
+ p, err := parse(test.input)
+ c.Assert(err, IsNil)
+ c.Assert(p.Len(), Equals, 1)
+ c.Assert(p.GetInt(test.key, int(test.def)), Equals, int(test.value))
+ }
+}
+
+func (s *TestSuite) TestMustGetInt(c *C) {
+ input := "key = 123\nkey2 = ghi"
+ p, err := parse(input)
+ c.Assert(err, IsNil)
+ c.Assert(p.MustGetInt("key"), Equals, int(123))
+ c.Assert(func() { p.MustGetInt("key2") }, PanicMatches, "strconv.ParseInt: parsing.*")
+ c.Assert(func() { p.MustGetInt("invalid") }, PanicMatches, "unknown property: invalid")
+}
+
+func (s *TestSuite) TestGetInt64(c *C) {
+ for _, test := range int64Tests {
+ p, err := parse(test.input)
+ c.Assert(err, IsNil)
+ c.Assert(p.Len(), Equals, 1)
+ c.Assert(p.GetInt64(test.key, test.def), Equals, test.value)
+ }
+}
+
+func (s *TestSuite) TestMustGetInt64(c *C) {
+ input := "key = 123\nkey2 = ghi"
+ p, err := parse(input)
+ c.Assert(err, IsNil)
+ c.Assert(p.MustGetInt64("key"), Equals, int64(123))
+ c.Assert(func() { p.MustGetInt64("key2") }, PanicMatches, "strconv.ParseInt: parsing.*")
+ c.Assert(func() { p.MustGetInt64("invalid") }, PanicMatches, "unknown property: invalid")
+}
+
+func (s *TestSuite) TestGetUint(c *C) {
+ for _, test := range uint64Tests {
+ p, err := parse(test.input)
+ c.Assert(err, IsNil)
+ c.Assert(p.Len(), Equals, 1)
+ c.Assert(p.GetUint(test.key, uint(test.def)), Equals, uint(test.value))
+ }
+}
+
+func (s *TestSuite) TestMustGetUint(c *C) {
+ input := "key = 123\nkey2 = ghi"
+ p, err := parse(input)
+ c.Assert(err, IsNil)
+ c.Assert(p.MustGetUint("key"), Equals, uint(123))
+ c.Assert(func() { p.MustGetUint64("key2") }, PanicMatches, "strconv.ParseUint: parsing.*")
+ c.Assert(func() { p.MustGetUint64("invalid") }, PanicMatches, "unknown property: invalid")
+}
+
+func (s *TestSuite) TestGetUint64(c *C) {
+ for _, test := range uint64Tests {
+ p, err := parse(test.input)
+ c.Assert(err, IsNil)
+ c.Assert(p.Len(), Equals, 1)
+ c.Assert(p.GetUint64(test.key, test.def), Equals, test.value)
+ }
+}
+
+func (s *TestSuite) TestMustGetUint64(c *C) {
+ input := "key = 123\nkey2 = ghi"
+ p, err := parse(input)
+ c.Assert(err, IsNil)
+ c.Assert(p.MustGetUint64("key"), Equals, uint64(123))
+ c.Assert(func() { p.MustGetUint64("key2") }, PanicMatches, "strconv.ParseUint: parsing.*")
+ c.Assert(func() { p.MustGetUint64("invalid") }, PanicMatches, "unknown property: invalid")
+}
+
+func (s *TestSuite) TestGetString(c *C) {
+ for _, test := range stringTests {
+ p, err := parse(test.input)
+ c.Assert(err, IsNil)
+ c.Assert(p.Len(), Equals, 1)
+ c.Assert(p.GetString(test.key, test.def), Equals, test.value)
+ }
+}
+
+func (s *TestSuite) TestMustGetString(c *C) {
+ input := `key = value`
+ p, err := parse(input)
+ c.Assert(err, IsNil)
+ c.Assert(p.MustGetString("key"), Equals, "value")
+ c.Assert(func() { p.MustGetString("invalid") }, PanicMatches, "unknown property: invalid")
+}
+
+func (s *TestSuite) TestComment(c *C) {
+ for _, test := range commentTests {
+ p, err := parse(test.input)
+ c.Assert(err, IsNil)
+ c.Assert(p.MustGetString(test.key), Equals, test.value)
+ c.Assert(p.GetComments(test.key), DeepEquals, test.comments)
+ if test.comments != nil {
+ c.Assert(p.GetComment(test.key), Equals, test.comments[len(test.comments)-1])
+ } else {
+ c.Assert(p.GetComment(test.key), Equals, "")
+ }
+
+ // test setting comments
+ if len(test.comments) > 0 {
+ // set single comment
+ p.ClearComments()
+ c.Assert(len(p.c), Equals, 0)
+ p.SetComment(test.key, test.comments[0])
+ c.Assert(p.GetComment(test.key), Equals, test.comments[0])
+
+ // set multiple comments
+ p.ClearComments()
+ c.Assert(len(p.c), Equals, 0)
+ p.SetComments(test.key, test.comments)
+ c.Assert(p.GetComments(test.key), DeepEquals, test.comments)
+
+ // clear comments for a key
+ p.SetComments(test.key, nil)
+ c.Assert(p.GetComment(test.key), Equals, "")
+ c.Assert(p.GetComments(test.key), IsNil)
+ }
+ }
+}
+
+func (s *TestSuite) TestFilter(c *C) {
+ for _, test := range filterTests {
+ p, err := parse(test.input)
+ c.Assert(err, IsNil)
+ pp, err := p.Filter(test.pattern)
+ if err != nil {
+ c.Assert(err, ErrorMatches, test.err)
+ continue
+ }
+ c.Assert(pp, NotNil)
+ c.Assert(pp.Len(), Equals, len(test.keys))
+ for _, key := range test.keys {
+ v1, ok1 := p.Get(key)
+ v2, ok2 := pp.Get(key)
+ c.Assert(ok1, Equals, true)
+ c.Assert(ok2, Equals, true)
+ c.Assert(v1, Equals, v2)
+ }
+ }
+}
+
+func (s *TestSuite) TestFilterPrefix(c *C) {
+ for _, test := range filterPrefixTests {
+ p, err := parse(test.input)
+ c.Assert(err, IsNil)
+ pp := p.FilterPrefix(test.prefix)
+ c.Assert(pp, NotNil)
+ c.Assert(pp.Len(), Equals, len(test.keys))
+ for _, key := range test.keys {
+ v1, ok1 := p.Get(key)
+ v2, ok2 := pp.Get(key)
+ c.Assert(ok1, Equals, true)
+ c.Assert(ok2, Equals, true)
+ c.Assert(v1, Equals, v2)
+ }
+ }
+}
+
+func (s *TestSuite) TestKeys(c *C) {
+ for _, test := range keysTests {
+ p, err := parse(test.input)
+ c.Assert(err, IsNil)
+ c.Assert(p.Len(), Equals, len(test.keys))
+ c.Assert(len(p.Keys()), Equals, len(test.keys))
+ c.Assert(p.Keys(), DeepEquals, test.keys)
+ }
+}
+
+func (s *TestSuite) TestSet(c *C) {
+ for _, test := range setTests {
+ p, err := parse(test.input)
+ c.Assert(err, IsNil)
+ prev, ok, err := p.Set(test.key, test.value)
+ if test.err != "" {
+ c.Assert(err, ErrorMatches, test.err)
+ continue
+ }
+
+ c.Assert(err, IsNil)
+ c.Assert(ok, Equals, test.ok)
+ if ok {
+ c.Assert(prev, Equals, test.prev)
+ }
+ c.Assert(p.Keys(), DeepEquals, test.keys)
+ }
+}
+
+func (s *TestSuite) TestMustSet(c *C) {
+ input := "key=${key}"
+ p, err := parse(input)
+ c.Assert(err, IsNil)
+ c.Assert(func() { p.MustSet("key", "${key}") }, PanicMatches, "circular reference .*")
+}
+
+func (s *TestSuite) TestWrite(c *C) {
+ for _, test := range writeTests {
+ p, err := parse(test.input)
+
+ buf := new(bytes.Buffer)
+ var n int
+ switch test.encoding {
+ case "UTF-8":
+ n, err = p.Write(buf, UTF8)
+ case "ISO-8859-1":
+ n, err = p.Write(buf, ISO_8859_1)
+ }
+ c.Assert(err, IsNil)
+ s := string(buf.Bytes())
+ c.Assert(n, Equals, len(test.output), Commentf("input=%q expected=%q obtained=%q", test.input, test.output, s))
+ c.Assert(s, Equals, test.output, Commentf("input=%q expected=%q obtained=%q", test.input, test.output, s))
+ }
+}
+
+func (s *TestSuite) TestWriteComment(c *C) {
+ for _, test := range writeCommentTests {
+ p, err := parse(test.input)
+
+ buf := new(bytes.Buffer)
+ var n int
+ switch test.encoding {
+ case "UTF-8":
+ n, err = p.WriteComment(buf, "# ", UTF8)
+ case "ISO-8859-1":
+ n, err = p.WriteComment(buf, "# ", ISO_8859_1)
+ }
+ c.Assert(err, IsNil)
+ s := string(buf.Bytes())
+ c.Assert(n, Equals, len(test.output), Commentf("input=%q expected=%q obtained=%q", test.input, test.output, s))
+ c.Assert(s, Equals, test.output, Commentf("input=%q expected=%q obtained=%q", test.input, test.output, s))
+ }
+}
+
+func (s *TestSuite) TestCustomExpansionExpression(c *C) {
+ testKeyValuePrePostfix(c, "*[", "]*", "key=value\nkey2=*[key]*", "key", "value", "key2", "value")
+}
+
+func (s *TestSuite) TestPanicOn32BitIntOverflow(c *C) {
+ is32Bit = true
+ var min, max int64 = math.MinInt32 - 1, math.MaxInt32 + 1
+ input := fmt.Sprintf("min=%d\nmax=%d", min, max)
+ p, err := parse(input)
+ c.Assert(err, IsNil)
+ c.Assert(p.MustGetInt64("min"), Equals, min)
+ c.Assert(p.MustGetInt64("max"), Equals, max)
+ c.Assert(func() { p.MustGetInt("min") }, PanicMatches, ".* out of range")
+ c.Assert(func() { p.MustGetInt("max") }, PanicMatches, ".* out of range")
+}
+
+func (s *TestSuite) TestPanicOn32BitUintOverflow(c *C) {
+ is32Bit = true
+ var max uint64 = math.MaxUint32 + 1
+ input := fmt.Sprintf("max=%d", max)
+ p, err := parse(input)
+ c.Assert(err, IsNil)
+ c.Assert(p.MustGetUint64("max"), Equals, max)
+ c.Assert(func() { p.MustGetUint("max") }, PanicMatches, ".* out of range")
+}
+
+func (s *TestSuite) TestDeleteKey(c *C) {
+ input := "#comments should also be gone\nkey=to-be-deleted\nsecond=key"
+ p, err := parse(input)
+ c.Assert(err, IsNil)
+ c.Check(len(p.m), Equals, 2)
+ c.Check(len(p.c), Equals, 1)
+ c.Check(len(p.k), Equals, 2)
+ p.Delete("key")
+ c.Check(len(p.m), Equals, 1)
+ c.Check(len(p.c), Equals, 0)
+ c.Check(len(p.k), Equals, 1)
+}
+
+func (s *TestSuite) TestDeleteUnknownKey(c *C) {
+ input := "#comments should also be gone\nkey=to-be-deleted"
+ p, err := parse(input)
+ c.Assert(err, IsNil)
+ c.Check(len(p.m), Equals, 1)
+ c.Check(len(p.c), Equals, 1)
+ c.Check(len(p.k), Equals, 1)
+ p.Delete("wrong-key")
+ c.Check(len(p.m), Equals, 1)
+ c.Check(len(p.c), Equals, 1)
+ c.Check(len(p.k), Equals, 1)
+}
+
+// ----------------------------------------------------------------------------
+
+// tests all combinations of delimiters, leading and/or trailing whitespace and newlines.
+func testWhitespaceAndDelimiterCombinations(c *C, key, value string) {
+ whitespace := []string{"", " ", "\f", "\t"}
+ delimiters := []string{"", " ", "=", ":"}
+ newlines := []string{"", "\r", "\n", "\r\n"}
+ for _, dl := range delimiters {
+ for _, ws1 := range whitespace {
+ for _, ws2 := range whitespace {
+ for _, nl := range newlines {
+ // skip the one case where there is nothing between a key and a value
+ if ws1 == "" && dl == "" && ws2 == "" && value != "" {
+ continue
+ }
+
+ input := fmt.Sprintf("%s%s%s%s%s%s", key, ws1, dl, ws2, value, nl)
+ testKeyValue(c, input, key, value)
+ }
+ }
+ }
+ }
+}
+
+// tests whether key/value pairs exist for a given input.
+// keyvalues is expected to be an even number of strings of "key", "value", ...
+func testKeyValue(c *C, input string, keyvalues ...string) {
+ testKeyValuePrePostfix(c, "${", "}", input, keyvalues...)
+}
+
+// tests whether key/value pairs exist for a given input.
+// keyvalues is expected to be an even number of strings of "key", "value", ...
+func testKeyValuePrePostfix(c *C, prefix, postfix, input string, keyvalues ...string) {
+ printf("%q\n", input)
+
+ p, err := Load([]byte(input), ISO_8859_1)
+ c.Assert(err, IsNil)
+ p.Prefix = prefix
+ p.Postfix = postfix
+ assertKeyValues(c, input, p, keyvalues...)
+}
+
+// tests whether key/value pairs exist for a given input.
+// keyvalues is expected to be an even number of strings of "key", "value", ...
+func assertKeyValues(c *C, input string, p *Properties, keyvalues ...string) {
+ c.Assert(p, NotNil)
+ c.Assert(2*p.Len(), Equals, len(keyvalues), Commentf("Odd number of key/value pairs."))
+
+ for i := 0; i < len(keyvalues); i += 2 {
+ key, value := keyvalues[i], keyvalues[i+1]
+ v, ok := p.Get(key)
+ c.Assert(ok, Equals, true, Commentf("No key %q found (input=%q)", key, input))
+ c.Assert(v, Equals, value, Commentf("Value %q does not match %q (input=%q)", v, value, input))
+ }
+}
+
+// prints to stderr if the -verbose flag was given.
+func printf(format string, args ...interface{}) {
+ if *verbose {
+ fmt.Fprintf(os.Stderr, format, args...)
+ }
+}
diff --git a/vendor/github.com/magiconair/properties/rangecheck.go b/vendor/github.com/magiconair/properties/rangecheck.go
new file mode 100644
index 0000000000..d9ce2806bb
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/rangecheck.go
@@ -0,0 +1,31 @@
+// Copyright 2016 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import (
+ "fmt"
+ "math"
+)
+
+// make this a var to overwrite it in a test
+var is32Bit = ^uint(0) == math.MaxUint32
+
+// intRangeCheck checks if the value fits into the int type and
+// panics if it does not.
+func intRangeCheck(key string, v int64) int {
+ if is32Bit && (v < math.MinInt32 || v > math.MaxInt32) {
+ panic(fmt.Sprintf("Value %d for key %s out of range", v, key))
+ }
+ return int(v)
+}
+
+// uintRangeCheck checks if the value fits into the uint type and
+// panics if it does not.
+func uintRangeCheck(key string, v uint64) uint {
+ if is32Bit && v > math.MaxUint32 {
+ panic(fmt.Sprintf("Value %d for key %s out of range", v, key))
+ }
+ return uint(v)
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/.travis.yml b/vendor/github.com/mitchellh/mapstructure/.travis.yml
new file mode 100644
index 0000000000..7f3fe9a969
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/.travis.yml
@@ -0,0 +1,7 @@
+language: go
+
+go:
+ - 1.4
+
+script:
+ - go test
diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE
new file mode 100644
index 0000000000..f9c841a51e
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md
new file mode 100644
index 0000000000..659d6885fc
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/README.md
@@ -0,0 +1,46 @@
+# mapstructure
+
+mapstructure is a Go library for decoding generic map values to structures
+and vice versa, while providing helpful error handling.
+
+This library is most useful when decoding values from some data stream (JSON,
+Gob, etc.) where you don't _quite_ know the structure of the underlying data
+until you read a part of it. You can therefore read a `map[string]interface{}`
+and use this library to decode it into the proper underlying native Go
+structure.
+
+## Installation
+
+Standard `go get`:
+
+```
+$ go get github.com/mitchellh/mapstructure
+```
+
+## Usage & Example
+
+For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure).
+
+The `Decode` function has examples associated with it there.
+
+## But Why?!
+
+Go offers fantastic standard libraries for decoding formats such as JSON.
+The standard method is to have a struct pre-created, and populate that struct
+from the bytes of the encoded format. This is great, but the problem is if
+you have configuration or an encoding that changes slightly depending on
+specific fields. For example, consider this JSON:
+
+```json
+{
+ "type": "person",
+ "name": "Mitchell"
+}
+```
+
+Perhaps we can't populate a specific structure without first reading
+the "type" field from the JSON. We could always do two passes over the
+decoding of the JSON (reading the "type" first, and the rest later).
+However, it is much simpler to just decode this into a `map[string]interface{}`
+structure, read the "type" key, then use something like this library
+to decode it into the proper structure.
diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
new file mode 100644
index 0000000000..aa91f76ce4
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
@@ -0,0 +1,151 @@
+package mapstructure
+
+import (
+ "errors"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
+// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
+func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
+ // Create variables here so we can reference them with the reflect pkg
+ var f1 DecodeHookFuncType
+ var f2 DecodeHookFuncKind
+
+ // Fill in the variables into this interface and the rest is done
+ // automatically using the reflect package.
+ potential := []interface{}{f1, f2}
+
+ v := reflect.ValueOf(h)
+ vt := v.Type()
+ for _, raw := range potential {
+ pt := reflect.ValueOf(raw).Type()
+ if vt.ConvertibleTo(pt) {
+ return v.Convert(pt).Interface()
+ }
+ }
+
+ return nil
+}
+
+// DecodeHookExec executes the given decode hook. This should be used
+// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
+// that took reflect.Kind instead of reflect.Type.
+func DecodeHookExec(
+ raw DecodeHookFunc,
+ from reflect.Type, to reflect.Type,
+ data interface{}) (interface{}, error) {
+ // Build our arguments that reflect expects
+ argVals := make([]reflect.Value, 3)
+ argVals[0] = reflect.ValueOf(from)
+ argVals[1] = reflect.ValueOf(to)
+ argVals[2] = reflect.ValueOf(data)
+
+ switch f := typedDecodeHook(raw).(type) {
+ case DecodeHookFuncType:
+ return f(from, to, data)
+ case DecodeHookFuncKind:
+ return f(from.Kind(), to.Kind(), data)
+ default:
+ return nil, errors.New("invalid decode hook signature")
+ }
+}
+
+// ComposeDecodeHookFunc creates a single DecodeHookFunc that
+// automatically composes multiple DecodeHookFuncs.
+//
+// The composed funcs are called in order, with the result of the
+// previous transformation.
+func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{}) (interface{}, error) {
+ var err error
+ for _, f1 := range fs {
+ data, err = DecodeHookExec(f1, f, t, data)
+ if err != nil {
+ return nil, err
+ }
+
+ // Modify the from kind to be correct with the new data
+ f = reflect.ValueOf(data).Type()
+ }
+
+ return data, nil
+ }
+}
+
+// StringToSliceHookFunc returns a DecodeHookFunc that converts
+// string to []string by splitting on the given sep.
+func StringToSliceHookFunc(sep string) DecodeHookFunc {
+ return func(
+ f reflect.Kind,
+ t reflect.Kind,
+ data interface{}) (interface{}, error) {
+ if f != reflect.String || t != reflect.Slice {
+ return data, nil
+ }
+
+ raw := data.(string)
+ if raw == "" {
+ return []string{}, nil
+ }
+
+ return strings.Split(raw, sep), nil
+ }
+}
+
+// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
+// strings to time.Duration.
+func StringToTimeDurationHookFunc() DecodeHookFunc {
+ return func(
+ f reflect.Type,
+ t reflect.Type,
+ data interface{}) (interface{}, error) {
+ if f.Kind() != reflect.String {
+ return data, nil
+ }
+ if t != reflect.TypeOf(time.Duration(5)) {
+ return data, nil
+ }
+
+ // Convert it by parsing
+ return time.ParseDuration(data.(string))
+ }
+}
+
+func WeaklyTypedHook(
+ f reflect.Kind,
+ t reflect.Kind,
+ data interface{}) (interface{}, error) {
+ dataVal := reflect.ValueOf(data)
+ switch t {
+ case reflect.String:
+ switch f {
+ case reflect.Bool:
+ if dataVal.Bool() {
+ return "1", nil
+ } else {
+ return "0", nil
+ }
+ case reflect.Float32:
+ return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
+ case reflect.Int:
+ return strconv.FormatInt(dataVal.Int(), 10), nil
+ case reflect.Slice:
+ dataType := dataVal.Type()
+ elemKind := dataType.Elem().Kind()
+ if elemKind == reflect.Uint8 {
+ return string(dataVal.Interface().([]uint8)), nil
+ }
+ case reflect.Uint:
+ return strconv.FormatUint(dataVal.Uint(), 10), nil
+ }
+ }
+
+ return data, nil
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks_test.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks_test.go
new file mode 100644
index 0000000000..53289afcfb
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks_test.go
@@ -0,0 +1,229 @@
+package mapstructure
+
+import (
+ "errors"
+ "reflect"
+ "testing"
+ "time"
+)
+
+func TestComposeDecodeHookFunc(t *testing.T) {
+ f1 := func(
+ f reflect.Kind,
+ t reflect.Kind,
+ data interface{}) (interface{}, error) {
+ return data.(string) + "foo", nil
+ }
+
+ f2 := func(
+ f reflect.Kind,
+ t reflect.Kind,
+ data interface{}) (interface{}, error) {
+ return data.(string) + "bar", nil
+ }
+
+ f := ComposeDecodeHookFunc(f1, f2)
+
+ result, err := DecodeHookExec(
+ f, reflect.TypeOf(""), reflect.TypeOf([]byte("")), "")
+ if err != nil {
+ t.Fatalf("bad: %s", err)
+ }
+ if result.(string) != "foobar" {
+ t.Fatalf("bad: %#v", result)
+ }
+}
+
+func TestComposeDecodeHookFunc_err(t *testing.T) {
+ f1 := func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) {
+ return nil, errors.New("foo")
+ }
+
+ f2 := func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) {
+ panic("NOPE")
+ }
+
+ f := ComposeDecodeHookFunc(f1, f2)
+
+ _, err := DecodeHookExec(
+ f, reflect.TypeOf(""), reflect.TypeOf([]byte("")), 42)
+ if err.Error() != "foo" {
+ t.Fatalf("bad: %s", err)
+ }
+}
+
+func TestComposeDecodeHookFunc_kinds(t *testing.T) {
+ var f2From reflect.Kind
+
+ f1 := func(
+ f reflect.Kind,
+ t reflect.Kind,
+ data interface{}) (interface{}, error) {
+ return int(42), nil
+ }
+
+ f2 := func(
+ f reflect.Kind,
+ t reflect.Kind,
+ data interface{}) (interface{}, error) {
+ f2From = f
+ return data, nil
+ }
+
+ f := ComposeDecodeHookFunc(f1, f2)
+
+ _, err := DecodeHookExec(
+ f, reflect.TypeOf(""), reflect.TypeOf([]byte("")), "")
+ if err != nil {
+ t.Fatalf("bad: %s", err)
+ }
+ if f2From != reflect.Int {
+ t.Fatalf("bad: %#v", f2From)
+ }
+}
+
+func TestStringToSliceHookFunc(t *testing.T) {
+ f := StringToSliceHookFunc(",")
+
+ strType := reflect.TypeOf("")
+ sliceType := reflect.TypeOf([]byte(""))
+ cases := []struct {
+ f, t reflect.Type
+ data interface{}
+ result interface{}
+ err bool
+ }{
+ {sliceType, sliceType, 42, 42, false},
+ {strType, strType, 42, 42, false},
+ {
+ strType,
+ sliceType,
+ "foo,bar,baz",
+ []string{"foo", "bar", "baz"},
+ false,
+ },
+ {
+ strType,
+ sliceType,
+ "",
+ []string{},
+ false,
+ },
+ }
+
+ for i, tc := range cases {
+ actual, err := DecodeHookExec(f, tc.f, tc.t, tc.data)
+ if tc.err != (err != nil) {
+ t.Fatalf("case %d: expected err %#v", i, tc.err)
+ }
+ if !reflect.DeepEqual(actual, tc.result) {
+ t.Fatalf(
+ "case %d: expected %#v, got %#v",
+ i, tc.result, actual)
+ }
+ }
+}
+
+func TestStringToTimeDurationHookFunc(t *testing.T) {
+ f := StringToTimeDurationHookFunc()
+
+ strType := reflect.TypeOf("")
+ timeType := reflect.TypeOf(time.Duration(5))
+ cases := []struct {
+ f, t reflect.Type
+ data interface{}
+ result interface{}
+ err bool
+ }{
+ {strType, timeType, "5s", 5 * time.Second, false},
+ {strType, timeType, "5", time.Duration(0), true},
+ {strType, strType, "5", "5", false},
+ }
+
+ for i, tc := range cases {
+ actual, err := DecodeHookExec(f, tc.f, tc.t, tc.data)
+ if tc.err != (err != nil) {
+ t.Fatalf("case %d: expected err %#v", i, tc.err)
+ }
+ if !reflect.DeepEqual(actual, tc.result) {
+ t.Fatalf(
+ "case %d: expected %#v, got %#v",
+ i, tc.result, actual)
+ }
+ }
+}
+
+func TestWeaklyTypedHook(t *testing.T) {
+ var f DecodeHookFunc = WeaklyTypedHook
+
+ boolType := reflect.TypeOf(true)
+ strType := reflect.TypeOf("")
+ sliceType := reflect.TypeOf([]byte(""))
+ cases := []struct {
+ f, t reflect.Type
+ data interface{}
+ result interface{}
+ err bool
+ }{
+ // TO STRING
+ {
+ boolType,
+ strType,
+ false,
+ "0",
+ false,
+ },
+
+ {
+ boolType,
+ strType,
+ true,
+ "1",
+ false,
+ },
+
+ {
+ reflect.TypeOf(float32(1)),
+ strType,
+ float32(7),
+ "7",
+ false,
+ },
+
+ {
+ reflect.TypeOf(int(1)),
+ strType,
+ int(7),
+ "7",
+ false,
+ },
+
+ {
+ sliceType,
+ strType,
+ []uint8("foo"),
+ "foo",
+ false,
+ },
+
+ {
+ reflect.TypeOf(uint(1)),
+ strType,
+ uint(7),
+ "7",
+ false,
+ },
+ }
+
+ for i, tc := range cases {
+ actual, err := DecodeHookExec(f, tc.f, tc.t, tc.data)
+ if tc.err != (err != nil) {
+ t.Fatalf("case %d: expected err %#v", i, tc.err)
+ }
+ if !reflect.DeepEqual(actual, tc.result) {
+ t.Fatalf(
+ "case %d: expected %#v, got %#v",
+ i, tc.result, actual)
+ }
+ }
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go
new file mode 100644
index 0000000000..47a99e5af3
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/error.go
@@ -0,0 +1,50 @@
+package mapstructure
+
+import (
+ "errors"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// Error implements the error interface and can represents multiple
+// errors that occur in the course of a single decode.
+type Error struct {
+ Errors []string
+}
+
+func (e *Error) Error() string {
+ points := make([]string, len(e.Errors))
+ for i, err := range e.Errors {
+ points[i] = fmt.Sprintf("* %s", err)
+ }
+
+ sort.Strings(points)
+ return fmt.Sprintf(
+ "%d error(s) decoding:\n\n%s",
+ len(e.Errors), strings.Join(points, "\n"))
+}
+
+// WrappedErrors implements the errwrap.Wrapper interface to make this
+// return value more useful with the errwrap and go-multierror libraries.
+func (e *Error) WrappedErrors() []error {
+ if e == nil {
+ return nil
+ }
+
+ result := make([]error, len(e.Errors))
+ for i, e := range e.Errors {
+ result[i] = errors.New(e)
+ }
+
+ return result
+}
+
+func appendErrors(errors []string, err error) []string {
+ switch e := err.(type) {
+ case *Error:
+ return append(errors, e.Errors...)
+ default:
+ return append(errors, e.Error())
+ }
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
new file mode 100644
index 0000000000..a367a95b68
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
@@ -0,0 +1,767 @@
+// The mapstructure package exposes functionality to convert an
+// abitrary map[string]interface{} into a native Go structure.
+//
+// The Go structure can be arbitrarily complex, containing slices,
+// other structs, etc. and the decoder will properly decode nested
+// maps and so on into the proper structures in the native Go struct.
+// See the examples to see what the decoder is capable of.
+package mapstructure
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// DecodeHookFunc is the callback function that can be used for
+// data transformations. See "DecodeHook" in the DecoderConfig
+// struct.
+//
+// The type should be DecodeHookFuncType or DecodeHookFuncKind.
+// Either is accepted. Types are a superset of Kinds (Types can return
+// Kinds) and are generally a richer thing to use, but Kinds are simpler
+// if you only need those.
+//
+// The reason DecodeHookFunc is multi-typed is for backwards compatibility:
+// we started with Kinds and then realized Types were the better solution,
+// but have a promise to not break backwards compat so we now support
+// both.
+type DecodeHookFunc interface{}
+
+type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error)
+type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
+
+// DecoderConfig is the configuration that is used to create a new decoder
+// and allows customization of various aspects of decoding.
+type DecoderConfig struct {
+ // DecodeHook, if set, will be called before any decoding and any
+ // type conversion (if WeaklyTypedInput is on). This lets you modify
+ // the values before they're set down onto the resulting struct.
+ //
+ // If an error is returned, the entire decode will fail with that
+ // error.
+ DecodeHook DecodeHookFunc
+
+ // If ErrorUnused is true, then it is an error for there to exist
+ // keys in the original map that were unused in the decoding process
+ // (extra keys).
+ ErrorUnused bool
+
+ // ZeroFields, if set to true, will zero fields before writing them.
+ // For example, a map will be emptied before decoded values are put in
+ // it. If this is false, a map will be merged.
+ ZeroFields bool
+
+ // If WeaklyTypedInput is true, the decoder will make the following
+ // "weak" conversions:
+ //
+ // - bools to string (true = "1", false = "0")
+ // - numbers to string (base 10)
+ // - bools to int/uint (true = 1, false = 0)
+ // - strings to int/uint (base implied by prefix)
+ // - int to bool (true if value != 0)
+ // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F,
+ // FALSE, false, False. Anything else is an error)
+ // - empty array = empty map and vice versa
+ // - negative numbers to overflowed uint values (base 10)
+ // - slice of maps to a merged map
+ //
+ WeaklyTypedInput bool
+
+ // Metadata is the struct that will contain extra metadata about
+ // the decoding. If this is nil, then no metadata will be tracked.
+ Metadata *Metadata
+
+ // Result is a pointer to the struct that will contain the decoded
+ // value.
+ Result interface{}
+
+ // The tag name that mapstructure reads for field names. This
+ // defaults to "mapstructure"
+ TagName string
+}
+
+// A Decoder takes a raw interface value and turns it into structured
+// data, keeping track of rich error information along the way in case
+// anything goes wrong. Unlike the basic top-level Decode method, you can
+// more finely control how the Decoder behaves using the DecoderConfig
+// structure. The top-level Decode method is just a convenience that sets
+// up the most basic Decoder.
+type Decoder struct {
+ config *DecoderConfig
+}
+
+// Metadata contains information about decoding a structure that
+// is tedious or difficult to get otherwise.
+type Metadata struct {
+ // Keys are the keys of the structure which were successfully decoded
+ Keys []string
+
+ // Unused is a slice of keys that were found in the raw value but
+ // weren't decoded since there was no matching field in the result interface
+ Unused []string
+}
+
+// Decode takes a map and uses reflection to convert it into the
+// given Go native structure. val must be a pointer to a struct.
+func Decode(m interface{}, rawVal interface{}) error {
+ config := &DecoderConfig{
+ Metadata: nil,
+ Result: rawVal,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ return err
+ }
+
+ return decoder.Decode(m)
+}
+
+// WeakDecode is the same as Decode but is shorthand to enable
+// WeaklyTypedInput. See DecoderConfig for more info.
+func WeakDecode(input, output interface{}) error {
+ config := &DecoderConfig{
+ Metadata: nil,
+ Result: output,
+ WeaklyTypedInput: true,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ return err
+ }
+
+ return decoder.Decode(input)
+}
+
+// NewDecoder returns a new decoder for the given configuration. Once
+// a decoder has been returned, the same configuration must not be used
+// again.
+func NewDecoder(config *DecoderConfig) (*Decoder, error) {
+ val := reflect.ValueOf(config.Result)
+ if val.Kind() != reflect.Ptr {
+ return nil, errors.New("result must be a pointer")
+ }
+
+ val = val.Elem()
+ if !val.CanAddr() {
+ return nil, errors.New("result must be addressable (a pointer)")
+ }
+
+ if config.Metadata != nil {
+ if config.Metadata.Keys == nil {
+ config.Metadata.Keys = make([]string, 0)
+ }
+
+ if config.Metadata.Unused == nil {
+ config.Metadata.Unused = make([]string, 0)
+ }
+ }
+
+ if config.TagName == "" {
+ config.TagName = "mapstructure"
+ }
+
+ result := &Decoder{
+ config: config,
+ }
+
+ return result, nil
+}
+
+// Decode decodes the given raw interface to the target pointer specified
+// by the configuration.
+func (d *Decoder) Decode(raw interface{}) error {
+ return d.decode("", raw, reflect.ValueOf(d.config.Result).Elem())
+}
+
+// Decodes an unknown data type into a specific reflection value.
+func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error {
+ if data == nil {
+ // If the data is nil, then we don't set anything.
+ return nil
+ }
+
+ dataVal := reflect.ValueOf(data)
+ if !dataVal.IsValid() {
+ // If the data value is invalid, then we just set the value
+ // to be the zero value.
+ val.Set(reflect.Zero(val.Type()))
+ return nil
+ }
+
+ if d.config.DecodeHook != nil {
+ // We have a DecodeHook, so let's pre-process the data.
+ var err error
+ data, err = DecodeHookExec(
+ d.config.DecodeHook,
+ dataVal.Type(), val.Type(), data)
+ if err != nil {
+ return err
+ }
+ }
+
+ var err error
+ dataKind := getKind(val)
+ switch dataKind {
+ case reflect.Bool:
+ err = d.decodeBool(name, data, val)
+ case reflect.Interface:
+ err = d.decodeBasic(name, data, val)
+ case reflect.String:
+ err = d.decodeString(name, data, val)
+ case reflect.Int:
+ err = d.decodeInt(name, data, val)
+ case reflect.Uint:
+ err = d.decodeUint(name, data, val)
+ case reflect.Float32:
+ err = d.decodeFloat(name, data, val)
+ case reflect.Struct:
+ err = d.decodeStruct(name, data, val)
+ case reflect.Map:
+ err = d.decodeMap(name, data, val)
+ case reflect.Ptr:
+ err = d.decodePtr(name, data, val)
+ case reflect.Slice:
+ err = d.decodeSlice(name, data, val)
+ default:
+ // If we reached this point then we weren't able to decode it
+ return fmt.Errorf("%s: unsupported type: %s", name, dataKind)
+ }
+
+ // If we reached here, then we successfully decoded SOMETHING, so
+ // mark the key as used if we're tracking metadata.
+ if d.config.Metadata != nil && name != "" {
+ d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
+ }
+
+ return err
+}
+
+// This decodes a basic type (bool, int, string, etc.) and sets the
+// value to "data" of that type.
+func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.ValueOf(data)
+ dataValType := dataVal.Type()
+ if !dataValType.AssignableTo(val.Type()) {
+ return fmt.Errorf(
+ "'%s' expected type '%s', got '%s'",
+ name, val.Type(), dataValType)
+ }
+
+ val.Set(dataVal)
+ return nil
+}
+
+func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.ValueOf(data)
+ dataKind := getKind(dataVal)
+
+ converted := true
+ switch {
+ case dataKind == reflect.String:
+ val.SetString(dataVal.String())
+ case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+ if dataVal.Bool() {
+ val.SetString("1")
+ } else {
+ val.SetString("0")
+ }
+ case dataKind == reflect.Int && d.config.WeaklyTypedInput:
+ val.SetString(strconv.FormatInt(dataVal.Int(), 10))
+ case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
+ val.SetString(strconv.FormatUint(dataVal.Uint(), 10))
+ case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
+ val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64))
+ case dataKind == reflect.Slice && d.config.WeaklyTypedInput:
+ dataType := dataVal.Type()
+ elemKind := dataType.Elem().Kind()
+ switch {
+ case elemKind == reflect.Uint8:
+ val.SetString(string(dataVal.Interface().([]uint8)))
+ default:
+ converted = false
+ }
+ default:
+ converted = false
+ }
+
+ if !converted {
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%s'",
+ name, val.Type(), dataVal.Type())
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.ValueOf(data)
+ dataKind := getKind(dataVal)
+
+ switch {
+ case dataKind == reflect.Int:
+ val.SetInt(dataVal.Int())
+ case dataKind == reflect.Uint:
+ val.SetInt(int64(dataVal.Uint()))
+ case dataKind == reflect.Float32:
+ val.SetInt(int64(dataVal.Float()))
+ case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+ if dataVal.Bool() {
+ val.SetInt(1)
+ } else {
+ val.SetInt(0)
+ }
+ case dataKind == reflect.String && d.config.WeaklyTypedInput:
+ i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits())
+ if err == nil {
+ val.SetInt(i)
+ } else {
+ return fmt.Errorf("cannot parse '%s' as int: %s", name, err)
+ }
+ default:
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%s'",
+ name, val.Type(), dataVal.Type())
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.ValueOf(data)
+ dataKind := getKind(dataVal)
+
+ switch {
+ case dataKind == reflect.Int:
+ i := dataVal.Int()
+ if i < 0 && !d.config.WeaklyTypedInput {
+ return fmt.Errorf("cannot parse '%s', %d overflows uint",
+ name, i)
+ }
+ val.SetUint(uint64(i))
+ case dataKind == reflect.Uint:
+ val.SetUint(dataVal.Uint())
+ case dataKind == reflect.Float32:
+ f := dataVal.Float()
+ if f < 0 && !d.config.WeaklyTypedInput {
+ return fmt.Errorf("cannot parse '%s', %f overflows uint",
+ name, f)
+ }
+ val.SetUint(uint64(f))
+ case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+ if dataVal.Bool() {
+ val.SetUint(1)
+ } else {
+ val.SetUint(0)
+ }
+ case dataKind == reflect.String && d.config.WeaklyTypedInput:
+ i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits())
+ if err == nil {
+ val.SetUint(i)
+ } else {
+ return fmt.Errorf("cannot parse '%s' as uint: %s", name, err)
+ }
+ default:
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%s'",
+ name, val.Type(), dataVal.Type())
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.ValueOf(data)
+ dataKind := getKind(dataVal)
+
+ switch {
+ case dataKind == reflect.Bool:
+ val.SetBool(dataVal.Bool())
+ case dataKind == reflect.Int && d.config.WeaklyTypedInput:
+ val.SetBool(dataVal.Int() != 0)
+ case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
+ val.SetBool(dataVal.Uint() != 0)
+ case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
+ val.SetBool(dataVal.Float() != 0)
+ case dataKind == reflect.String && d.config.WeaklyTypedInput:
+ b, err := strconv.ParseBool(dataVal.String())
+ if err == nil {
+ val.SetBool(b)
+ } else if dataVal.String() == "" {
+ val.SetBool(false)
+ } else {
+ return fmt.Errorf("cannot parse '%s' as bool: %s", name, err)
+ }
+ default:
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%s'",
+ name, val.Type(), dataVal.Type())
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.ValueOf(data)
+ dataKind := getKind(dataVal)
+
+ switch {
+ case dataKind == reflect.Int:
+ val.SetFloat(float64(dataVal.Int()))
+ case dataKind == reflect.Uint:
+ val.SetFloat(float64(dataVal.Uint()))
+ case dataKind == reflect.Float32:
+ val.SetFloat(float64(dataVal.Float()))
+ case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+ if dataVal.Bool() {
+ val.SetFloat(1)
+ } else {
+ val.SetFloat(0)
+ }
+ case dataKind == reflect.String && d.config.WeaklyTypedInput:
+ f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits())
+ if err == nil {
+ val.SetFloat(f)
+ } else {
+ return fmt.Errorf("cannot parse '%s' as float: %s", name, err)
+ }
+ default:
+ return fmt.Errorf(
+ "'%s' expected type '%s', got unconvertible type '%s'",
+ name, val.Type(), dataVal.Type())
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error {
+ valType := val.Type()
+ valKeyType := valType.Key()
+ valElemType := valType.Elem()
+
+ // By default we overwrite keys in the current map
+ valMap := val
+
+ // If the map is nil or we're purposely zeroing fields, make a new map
+ if valMap.IsNil() || d.config.ZeroFields {
+ // Make a new map to hold our result
+ mapType := reflect.MapOf(valKeyType, valElemType)
+ valMap = reflect.MakeMap(mapType)
+ }
+
+ // Check input type
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ if dataVal.Kind() != reflect.Map {
+ // In weak mode, we accept a slice of maps as an input...
+ if d.config.WeaklyTypedInput {
+ switch dataVal.Kind() {
+ case reflect.Array, reflect.Slice:
+ // Special case for BC reasons (covered by tests)
+ if dataVal.Len() == 0 {
+ val.Set(valMap)
+ return nil
+ }
+
+ for i := 0; i < dataVal.Len(); i++ {
+ err := d.decode(
+ fmt.Sprintf("%s[%d]", name, i),
+ dataVal.Index(i).Interface(), val)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+ }
+ }
+
+ return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
+ }
+
+ // Accumulate errors
+ errors := make([]string, 0)
+
+ for _, k := range dataVal.MapKeys() {
+ fieldName := fmt.Sprintf("%s[%s]", name, k)
+
+ // First decode the key into the proper type
+ currentKey := reflect.Indirect(reflect.New(valKeyType))
+ if err := d.decode(fieldName, k.Interface(), currentKey); err != nil {
+ errors = appendErrors(errors, err)
+ continue
+ }
+
+ // Next decode the data into the proper type
+ v := dataVal.MapIndex(k).Interface()
+ currentVal := reflect.Indirect(reflect.New(valElemType))
+ if err := d.decode(fieldName, v, currentVal); err != nil {
+ errors = appendErrors(errors, err)
+ continue
+ }
+
+ valMap.SetMapIndex(currentKey, currentVal)
+ }
+
+ // Set the built up map to the value
+ val.Set(valMap)
+
+ // If we had errors, return those
+ if len(errors) > 0 {
+ return &Error{errors}
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error {
+ // Create an element of the concrete (non pointer) type and decode
+ // into that. Then set the value of the pointer to this type.
+ valType := val.Type()
+ valElemType := valType.Elem()
+ realVal := reflect.New(valElemType)
+ if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
+ return err
+ }
+
+ val.Set(realVal)
+ return nil
+}
+
+func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+ dataValKind := dataVal.Kind()
+ valType := val.Type()
+ valElemType := valType.Elem()
+ sliceType := reflect.SliceOf(valElemType)
+
+ // Check input type
+ if dataValKind != reflect.Array && dataValKind != reflect.Slice {
+ // Accept empty map instead of array/slice in weakly typed mode
+ if d.config.WeaklyTypedInput && dataVal.Kind() == reflect.Map && dataVal.Len() == 0 {
+ val.Set(reflect.MakeSlice(sliceType, 0, 0))
+ return nil
+ } else {
+ return fmt.Errorf(
+ "'%s': source data must be an array or slice, got %s", name, dataValKind)
+ }
+ }
+
+ // Make a new slice to hold our result, same size as the original data.
+ valSlice := reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
+
+ // Accumulate any errors
+ errors := make([]string, 0)
+
+ for i := 0; i < dataVal.Len(); i++ {
+ currentData := dataVal.Index(i).Interface()
+ currentField := valSlice.Index(i)
+
+ fieldName := fmt.Sprintf("%s[%d]", name, i)
+ if err := d.decode(fieldName, currentData, currentField); err != nil {
+ errors = appendErrors(errors, err)
+ }
+ }
+
+ // Finally, set the value to the slice we built up
+ val.Set(valSlice)
+
+ // If there were errors, we return those
+ if len(errors) > 0 {
+ return &Error{errors}
+ }
+
+ return nil
+}
+
+func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error {
+ dataVal := reflect.Indirect(reflect.ValueOf(data))
+
+ // If the type of the value to write to and the data match directly,
+ // then we just set it directly instead of recursing into the structure.
+ if dataVal.Type() == val.Type() {
+ val.Set(dataVal)
+ return nil
+ }
+
+ dataValKind := dataVal.Kind()
+ if dataValKind != reflect.Map {
+ return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind)
+ }
+
+ dataValType := dataVal.Type()
+ if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface {
+ return fmt.Errorf(
+ "'%s' needs a map with string keys, has '%s' keys",
+ name, dataValType.Key().Kind())
+ }
+
+ dataValKeys := make(map[reflect.Value]struct{})
+ dataValKeysUnused := make(map[interface{}]struct{})
+ for _, dataValKey := range dataVal.MapKeys() {
+ dataValKeys[dataValKey] = struct{}{}
+ dataValKeysUnused[dataValKey.Interface()] = struct{}{}
+ }
+
+ errors := make([]string, 0)
+
+ // This slice will keep track of all the structs we'll be decoding.
+ // There can be more than one struct if there are embedded structs
+ // that are squashed.
+ structs := make([]reflect.Value, 1, 5)
+ structs[0] = val
+
+ // Compile the list of all the fields that we're going to be decoding
+ // from all the structs.
+ fields := make(map[*reflect.StructField]reflect.Value)
+ for len(structs) > 0 {
+ structVal := structs[0]
+ structs = structs[1:]
+
+ structType := structVal.Type()
+
+ for i := 0; i < structType.NumField(); i++ {
+ fieldType := structType.Field(i)
+ fieldKind := fieldType.Type.Kind()
+
+ if fieldType.Anonymous {
+ if fieldKind != reflect.Struct {
+ errors = appendErrors(errors,
+ fmt.Errorf("%s: unsupported type: %s", fieldType.Name, fieldKind))
+ continue
+ }
+ }
+
+ // If "squash" is specified in the tag, we squash the field down.
+ squash := false
+ tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
+ for _, tag := range tagParts[1:] {
+ if tag == "squash" {
+ squash = true
+ break
+ }
+ }
+
+ if squash {
+ if fieldKind != reflect.Struct {
+ errors = appendErrors(errors,
+ fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind))
+ } else {
+ structs = append(structs, val.FieldByName(fieldType.Name))
+ }
+ continue
+ }
+
+ // Normal struct field, store it away
+ fields[&fieldType] = structVal.Field(i)
+ }
+ }
+
+ for fieldType, field := range fields {
+ fieldName := fieldType.Name
+
+ tagValue := fieldType.Tag.Get(d.config.TagName)
+ tagValue = strings.SplitN(tagValue, ",", 2)[0]
+ if tagValue != "" {
+ fieldName = tagValue
+ }
+
+ rawMapKey := reflect.ValueOf(fieldName)
+ rawMapVal := dataVal.MapIndex(rawMapKey)
+ if !rawMapVal.IsValid() {
+ // Do a slower search by iterating over each key and
+ // doing case-insensitive search.
+ for dataValKey, _ := range dataValKeys {
+ mK, ok := dataValKey.Interface().(string)
+ if !ok {
+ // Not a string key
+ continue
+ }
+
+ if strings.EqualFold(mK, fieldName) {
+ rawMapKey = dataValKey
+ rawMapVal = dataVal.MapIndex(dataValKey)
+ break
+ }
+ }
+
+ if !rawMapVal.IsValid() {
+ // There was no matching key in the map for the value in
+ // the struct. Just ignore.
+ continue
+ }
+ }
+
+ // Delete the key we're using from the unused map so we stop tracking
+ delete(dataValKeysUnused, rawMapKey.Interface())
+
+ if !field.IsValid() {
+ // This should never happen
+ panic("field is not valid")
+ }
+
+ // If we can't set the field, then it is unexported or something,
+ // and we just continue onwards.
+ if !field.CanSet() {
+ continue
+ }
+
+ // If the name is empty string, then we're at the root, and we
+ // don't dot-join the fields.
+ if name != "" {
+ fieldName = fmt.Sprintf("%s.%s", name, fieldName)
+ }
+
+ if err := d.decode(fieldName, rawMapVal.Interface(), field); err != nil {
+ errors = appendErrors(errors, err)
+ }
+ }
+
+ if d.config.ErrorUnused && len(dataValKeysUnused) > 0 {
+ keys := make([]string, 0, len(dataValKeysUnused))
+ for rawKey, _ := range dataValKeysUnused {
+ keys = append(keys, rawKey.(string))
+ }
+ sort.Strings(keys)
+
+ err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", "))
+ errors = appendErrors(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return &Error{errors}
+ }
+
+ // Add the unused keys to the list of unused keys if we're tracking metadata
+ if d.config.Metadata != nil {
+ for rawKey, _ := range dataValKeysUnused {
+ key := rawKey.(string)
+ if name != "" {
+ key = fmt.Sprintf("%s.%s", name, key)
+ }
+
+ d.config.Metadata.Unused = append(d.config.Metadata.Unused, key)
+ }
+ }
+
+ return nil
+}
+
+func getKind(val reflect.Value) reflect.Kind {
+ kind := val.Kind()
+
+ switch {
+ case kind >= reflect.Int && kind <= reflect.Int64:
+ return reflect.Int
+ case kind >= reflect.Uint && kind <= reflect.Uint64:
+ return reflect.Uint
+ case kind >= reflect.Float32 && kind <= reflect.Float64:
+ return reflect.Float32
+ default:
+ return kind
+ }
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go b/vendor/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go
new file mode 100644
index 0000000000..41d2a41f75
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go
@@ -0,0 +1,279 @@
+package mapstructure
+
+import (
+ "encoding/json"
+ "testing"
+)
+
+func Benchmark_Decode(b *testing.B) {
+ type Person struct {
+ Name string
+ Age int
+ Emails []string
+ Extra map[string]string
+ }
+
+ input := map[string]interface{}{
+ "name": "Mitchell",
+ "age": 91,
+ "emails": []string{"one", "two", "three"},
+ "extra": map[string]string{
+ "twitter": "mitchellh",
+ },
+ }
+
+ var result Person
+ for i := 0; i < b.N; i++ {
+ Decode(input, &result)
+ }
+}
+
+// decodeViaJSON takes the map data and passes it through encoding/json to convert it into the
+// given Go native structure pointed to by v. v must be a pointer to a struct.
+func decodeViaJSON(data interface{}, v interface{}) error {
+ // Perform the task by simply marshalling the input into JSON,
+ // then unmarshalling it into target native Go struct.
+ b, err := json.Marshal(data)
+ if err != nil {
+ return err
+ }
+ return json.Unmarshal(b, v)
+}
+
+func Benchmark_DecodeViaJSON(b *testing.B) {
+ type Person struct {
+ Name string
+ Age int
+ Emails []string
+ Extra map[string]string
+ }
+
+ input := map[string]interface{}{
+ "name": "Mitchell",
+ "age": 91,
+ "emails": []string{"one", "two", "three"},
+ "extra": map[string]string{
+ "twitter": "mitchellh",
+ },
+ }
+
+ var result Person
+ for i := 0; i < b.N; i++ {
+ decodeViaJSON(input, &result)
+ }
+}
+
+func Benchmark_DecodeBasic(b *testing.B) {
+ input := map[string]interface{}{
+ "vstring": "foo",
+ "vint": 42,
+ "Vuint": 42,
+ "vbool": true,
+ "Vfloat": 42.42,
+ "vsilent": true,
+ "vdata": 42,
+ }
+
+ var result Basic
+ for i := 0; i < b.N; i++ {
+ Decode(input, &result)
+ }
+}
+
+func Benchmark_DecodeEmbedded(b *testing.B) {
+ input := map[string]interface{}{
+ "vstring": "foo",
+ "Basic": map[string]interface{}{
+ "vstring": "innerfoo",
+ },
+ "vunique": "bar",
+ }
+
+ var result Embedded
+ for i := 0; i < b.N; i++ {
+ Decode(input, &result)
+ }
+}
+
+func Benchmark_DecodeTypeConversion(b *testing.B) {
+ input := map[string]interface{}{
+ "IntToFloat": 42,
+ "IntToUint": 42,
+ "IntToBool": 1,
+ "IntToString": 42,
+ "UintToInt": 42,
+ "UintToFloat": 42,
+ "UintToBool": 42,
+ "UintToString": 42,
+ "BoolToInt": true,
+ "BoolToUint": true,
+ "BoolToFloat": true,
+ "BoolToString": true,
+ "FloatToInt": 42.42,
+ "FloatToUint": 42.42,
+ "FloatToBool": 42.42,
+ "FloatToString": 42.42,
+ "StringToInt": "42",
+ "StringToUint": "42",
+ "StringToBool": "1",
+ "StringToFloat": "42.42",
+ "SliceToMap": []interface{}{},
+ "MapToSlice": map[string]interface{}{},
+ }
+
+ var resultStrict TypeConversionResult
+ for i := 0; i < b.N; i++ {
+ Decode(input, &resultStrict)
+ }
+}
+
+func Benchmark_DecodeMap(b *testing.B) {
+ input := map[string]interface{}{
+ "vfoo": "foo",
+ "vother": map[interface{}]interface{}{
+ "foo": "foo",
+ "bar": "bar",
+ },
+ }
+
+ var result Map
+ for i := 0; i < b.N; i++ {
+ Decode(input, &result)
+ }
+}
+
+func Benchmark_DecodeMapOfStruct(b *testing.B) {
+ input := map[string]interface{}{
+ "value": map[string]interface{}{
+ "foo": map[string]string{"vstring": "one"},
+ "bar": map[string]string{"vstring": "two"},
+ },
+ }
+
+ var result MapOfStruct
+ for i := 0; i < b.N; i++ {
+ Decode(input, &result)
+ }
+}
+
+func Benchmark_DecodeSlice(b *testing.B) {
+ input := map[string]interface{}{
+ "vfoo": "foo",
+ "vbar": []string{"foo", "bar", "baz"},
+ }
+
+ var result Slice
+ for i := 0; i < b.N; i++ {
+ Decode(input, &result)
+ }
+}
+
+func Benchmark_DecodeSliceOfStruct(b *testing.B) {
+ input := map[string]interface{}{
+ "value": []map[string]interface{}{
+ {"vstring": "one"},
+ {"vstring": "two"},
+ },
+ }
+
+ var result SliceOfStruct
+ for i := 0; i < b.N; i++ {
+ Decode(input, &result)
+ }
+}
+
+func Benchmark_DecodeWeaklyTypedInput(b *testing.B) {
+ type Person struct {
+ Name string
+ Age int
+ Emails []string
+ }
+
+ // This input can come from anywhere, but typically comes from
+ // something like decoding JSON, generated by a weakly typed language
+ // such as PHP.
+ input := map[string]interface{}{
+ "name": 123, // number => string
+ "age": "42", // string => number
+ "emails": map[string]interface{}{}, // empty map => empty array
+ }
+
+ var result Person
+ config := &DecoderConfig{
+ WeaklyTypedInput: true,
+ Result: &result,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ panic(err)
+ }
+
+ for i := 0; i < b.N; i++ {
+ decoder.Decode(input)
+ }
+}
+
+func Benchmark_DecodeMetadata(b *testing.B) {
+ type Person struct {
+ Name string
+ Age int
+ }
+
+ input := map[string]interface{}{
+ "name": "Mitchell",
+ "age": 91,
+ "email": "foo@bar.com",
+ }
+
+ var md Metadata
+ var result Person
+ config := &DecoderConfig{
+ Metadata: &md,
+ Result: &result,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ panic(err)
+ }
+
+ for i := 0; i < b.N; i++ {
+ decoder.Decode(input)
+ }
+}
+
+func Benchmark_DecodeMetadataEmbedded(b *testing.B) {
+ input := map[string]interface{}{
+ "vstring": "foo",
+ "vunique": "bar",
+ }
+
+ var md Metadata
+ var result EmbeddedSquash
+ config := &DecoderConfig{
+ Metadata: &md,
+ Result: &result,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ b.Fatalf("err: %s", err)
+ }
+
+ for i := 0; i < b.N; i++ {
+ decoder.Decode(input)
+ }
+}
+
+func Benchmark_DecodeTagged(b *testing.B) {
+ input := map[string]interface{}{
+ "foo": "bar",
+ "bar": "value",
+ }
+
+ var result Tagged
+ for i := 0; i < b.N; i++ {
+ Decode(input, &result)
+ }
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go b/vendor/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go
new file mode 100644
index 0000000000..7054f1ac9a
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go
@@ -0,0 +1,47 @@
+package mapstructure
+
+import "testing"
+
+// GH-1
+func TestDecode_NilValue(t *testing.T) {
+ input := map[string]interface{}{
+ "vfoo": nil,
+ "vother": nil,
+ }
+
+ var result Map
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("should not error: %s", err)
+ }
+
+ if result.Vfoo != "" {
+ t.Fatalf("value should be default: %s", result.Vfoo)
+ }
+
+ if result.Vother != nil {
+ t.Fatalf("Vother should be nil: %s", result.Vother)
+ }
+}
+
+// GH-10
+func TestDecode_mapInterfaceInterface(t *testing.T) {
+ input := map[interface{}]interface{}{
+ "vfoo": nil,
+ "vother": nil,
+ }
+
+ var result Map
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("should not error: %s", err)
+ }
+
+ if result.Vfoo != "" {
+ t.Fatalf("value should be default: %s", result.Vfoo)
+ }
+
+ if result.Vother != nil {
+ t.Fatalf("Vother should be nil: %s", result.Vother)
+ }
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure_examples_test.go b/vendor/github.com/mitchellh/mapstructure/mapstructure_examples_test.go
new file mode 100644
index 0000000000..f17c214a8a
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/mapstructure_examples_test.go
@@ -0,0 +1,203 @@
+package mapstructure
+
+import (
+ "fmt"
+)
+
+func ExampleDecode() {
+ type Person struct {
+ Name string
+ Age int
+ Emails []string
+ Extra map[string]string
+ }
+
+ // This input can come from anywhere, but typically comes from
+ // something like decoding JSON where we're not quite sure of the
+ // struct initially.
+ input := map[string]interface{}{
+ "name": "Mitchell",
+ "age": 91,
+ "emails": []string{"one", "two", "three"},
+ "extra": map[string]string{
+ "twitter": "mitchellh",
+ },
+ }
+
+ var result Person
+ err := Decode(input, &result)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Printf("%#v", result)
+ // Output:
+ // mapstructure.Person{Name:"Mitchell", Age:91, Emails:[]string{"one", "two", "three"}, Extra:map[string]string{"twitter":"mitchellh"}}
+}
+
+func ExampleDecode_errors() {
+ type Person struct {
+ Name string
+ Age int
+ Emails []string
+ Extra map[string]string
+ }
+
+ // This input can come from anywhere, but typically comes from
+ // something like decoding JSON where we're not quite sure of the
+ // struct initially.
+ input := map[string]interface{}{
+ "name": 123,
+ "age": "bad value",
+ "emails": []int{1, 2, 3},
+ }
+
+ var result Person
+ err := Decode(input, &result)
+ if err == nil {
+ panic("should have an error")
+ }
+
+ fmt.Println(err.Error())
+ // Output:
+ // 5 error(s) decoding:
+ //
+ // * 'Age' expected type 'int', got unconvertible type 'string'
+ // * 'Emails[0]' expected type 'string', got unconvertible type 'int'
+ // * 'Emails[1]' expected type 'string', got unconvertible type 'int'
+ // * 'Emails[2]' expected type 'string', got unconvertible type 'int'
+ // * 'Name' expected type 'string', got unconvertible type 'int'
+}
+
+func ExampleDecode_metadata() {
+ type Person struct {
+ Name string
+ Age int
+ }
+
+ // This input can come from anywhere, but typically comes from
+ // something like decoding JSON where we're not quite sure of the
+ // struct initially.
+ input := map[string]interface{}{
+ "name": "Mitchell",
+ "age": 91,
+ "email": "foo@bar.com",
+ }
+
+ // For metadata, we make a more advanced DecoderConfig so we can
+ // more finely configure the decoder that is used. In this case, we
+ // just tell the decoder we want to track metadata.
+ var md Metadata
+ var result Person
+ config := &DecoderConfig{
+ Metadata: &md,
+ Result: &result,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ panic(err)
+ }
+
+ if err := decoder.Decode(input); err != nil {
+ panic(err)
+ }
+
+ fmt.Printf("Unused keys: %#v", md.Unused)
+ // Output:
+ // Unused keys: []string{"email"}
+}
+
+func ExampleDecode_weaklyTypedInput() {
+ type Person struct {
+ Name string
+ Age int
+ Emails []string
+ }
+
+ // This input can come from anywhere, but typically comes from
+ // something like decoding JSON, generated by a weakly typed language
+ // such as PHP.
+ input := map[string]interface{}{
+ "name": 123, // number => string
+ "age": "42", // string => number
+ "emails": map[string]interface{}{}, // empty map => empty array
+ }
+
+ var result Person
+ config := &DecoderConfig{
+ WeaklyTypedInput: true,
+ Result: &result,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ panic(err)
+ }
+
+ err = decoder.Decode(input)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Printf("%#v", result)
+ // Output: mapstructure.Person{Name:"123", Age:42, Emails:[]string{}}
+}
+
+func ExampleDecode_tags() {
+ // Note that the mapstructure tags defined in the struct type
+ // can indicate which fields the values are mapped to.
+ type Person struct {
+ Name string `mapstructure:"person_name"`
+ Age int `mapstructure:"person_age"`
+ }
+
+ input := map[string]interface{}{
+ "person_name": "Mitchell",
+ "person_age": 91,
+ }
+
+ var result Person
+ err := Decode(input, &result)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Printf("%#v", result)
+ // Output:
+ // mapstructure.Person{Name:"Mitchell", Age:91}
+}
+
+func ExampleDecode_embeddedStruct() {
+ // Squashing multiple embedded structs is allowed using the squash tag.
+ // This is demonstrated by creating a composite struct of multiple types
+ // and decoding into it. In this case, a person can carry with it both
+ // a Family and a Location, as well as their own FirstName.
+ type Family struct {
+ LastName string
+ }
+ type Location struct {
+ City string
+ }
+ type Person struct {
+ Family `mapstructure:",squash"`
+ Location `mapstructure:",squash"`
+ FirstName string
+ }
+
+ input := map[string]interface{}{
+ "FirstName": "Mitchell",
+ "LastName": "Hashimoto",
+ "City": "San Francisco",
+ }
+
+ var result Person
+ err := Decode(input, &result)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Printf("%s %s, %s", result.FirstName, result.LastName, result.City)
+ // Output:
+ // Mitchell Hashimoto, San Francisco
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go b/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go
new file mode 100644
index 0000000000..45e72849f5
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go
@@ -0,0 +1,1047 @@
+package mapstructure
+
+import (
+ "reflect"
+ "sort"
+ "strings"
+ "testing"
+)
+
+type Basic struct {
+ Vstring string
+ Vint int
+ Vuint uint
+ Vbool bool
+ Vfloat float64
+ Vextra string
+ vsilent bool
+ Vdata interface{}
+}
+
+type BasicSquash struct {
+ Test Basic `mapstructure:",squash"`
+}
+
+type Embedded struct {
+ Basic
+ Vunique string
+}
+
+type EmbeddedPointer struct {
+ *Basic
+ Vunique string
+}
+
+type EmbeddedSquash struct {
+ Basic `mapstructure:",squash"`
+ Vunique string
+}
+
+type SquashOnNonStructType struct {
+ InvalidSquashType int `mapstructure:",squash"`
+}
+
+type Map struct {
+ Vfoo string
+ Vother map[string]string
+}
+
+type MapOfStruct struct {
+ Value map[string]Basic
+}
+
+type Nested struct {
+ Vfoo string
+ Vbar Basic
+}
+
+type NestedPointer struct {
+ Vfoo string
+ Vbar *Basic
+}
+
+type Slice struct {
+ Vfoo string
+ Vbar []string
+}
+
+type SliceOfStruct struct {
+ Value []Basic
+}
+
+type Tagged struct {
+ Extra string `mapstructure:"bar,what,what"`
+ Value string `mapstructure:"foo"`
+}
+
+type TypeConversionResult struct {
+ IntToFloat float32
+ IntToUint uint
+ IntToBool bool
+ IntToString string
+ UintToInt int
+ UintToFloat float32
+ UintToBool bool
+ UintToString string
+ BoolToInt int
+ BoolToUint uint
+ BoolToFloat float32
+ BoolToString string
+ FloatToInt int
+ FloatToUint uint
+ FloatToBool bool
+ FloatToString string
+ SliceUint8ToString string
+ StringToInt int
+ StringToUint uint
+ StringToBool bool
+ StringToFloat float32
+ SliceToMap map[string]interface{}
+ MapToSlice []interface{}
+}
+
+func TestBasicTypes(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vstring": "foo",
+ "vint": 42,
+ "Vuint": 42,
+ "vbool": true,
+ "Vfloat": 42.42,
+ "vsilent": true,
+ "vdata": 42,
+ }
+
+ var result Basic
+ err := Decode(input, &result)
+ if err != nil {
+ t.Errorf("got an err: %s", err.Error())
+ t.FailNow()
+ }
+
+ if result.Vstring != "foo" {
+ t.Errorf("vstring value should be 'foo': %#v", result.Vstring)
+ }
+
+ if result.Vint != 42 {
+ t.Errorf("vint value should be 42: %#v", result.Vint)
+ }
+
+ if result.Vuint != 42 {
+ t.Errorf("vuint value should be 42: %#v", result.Vuint)
+ }
+
+ if result.Vbool != true {
+ t.Errorf("vbool value should be true: %#v", result.Vbool)
+ }
+
+ if result.Vfloat != 42.42 {
+ t.Errorf("vfloat value should be 42.42: %#v", result.Vfloat)
+ }
+
+ if result.Vextra != "" {
+ t.Errorf("vextra value should be empty: %#v", result.Vextra)
+ }
+
+ if result.vsilent != false {
+ t.Error("vsilent should not be set, it is unexported")
+ }
+
+ if result.Vdata != 42 {
+ t.Error("vdata should be valid")
+ }
+}
+
+func TestBasic_IntWithFloat(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vint": float64(42),
+ }
+
+ var result Basic
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("got an err: %s", err)
+ }
+}
+
+func TestBasic_Merge(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vint": 42,
+ }
+
+ var result Basic
+ result.Vuint = 100
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("got an err: %s", err)
+ }
+
+ expected := Basic{
+ Vint: 42,
+ Vuint: 100,
+ }
+ if !reflect.DeepEqual(result, expected) {
+ t.Fatalf("bad: %#v", result)
+ }
+}
+
+func TestDecode_BasicSquash(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vstring": "foo",
+ }
+
+ var result BasicSquash
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("got an err: %s", err.Error())
+ }
+
+ if result.Test.Vstring != "foo" {
+ t.Errorf("vstring value should be 'foo': %#v", result.Test.Vstring)
+ }
+}
+
+func TestDecode_Embedded(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vstring": "foo",
+ "Basic": map[string]interface{}{
+ "vstring": "innerfoo",
+ },
+ "vunique": "bar",
+ }
+
+ var result Embedded
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("got an err: %s", err.Error())
+ }
+
+ if result.Vstring != "innerfoo" {
+ t.Errorf("vstring value should be 'innerfoo': %#v", result.Vstring)
+ }
+
+ if result.Vunique != "bar" {
+ t.Errorf("vunique value should be 'bar': %#v", result.Vunique)
+ }
+}
+
+func TestDecode_EmbeddedPointer(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vstring": "foo",
+ "Basic": map[string]interface{}{
+ "vstring": "innerfoo",
+ },
+ "vunique": "bar",
+ }
+
+ var result EmbeddedPointer
+ err := Decode(input, &result)
+ if err == nil {
+ t.Fatal("should get error")
+ }
+}
+
+func TestDecode_EmbeddedSquash(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vstring": "foo",
+ "vunique": "bar",
+ }
+
+ var result EmbeddedSquash
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("got an err: %s", err.Error())
+ }
+
+ if result.Vstring != "foo" {
+ t.Errorf("vstring value should be 'foo': %#v", result.Vstring)
+ }
+
+ if result.Vunique != "bar" {
+ t.Errorf("vunique value should be 'bar': %#v", result.Vunique)
+ }
+}
+
+func TestDecode_SquashOnNonStructType(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "InvalidSquashType": 42,
+ }
+
+ var result SquashOnNonStructType
+ err := Decode(input, &result)
+ if err == nil {
+ t.Fatal("unexpected success decoding invalid squash field type")
+ } else if !strings.Contains(err.Error(), "unsupported type for squash") {
+ t.Fatalf("unexpected error message for invalid squash field type: %s", err)
+ }
+}
+
+func TestDecode_DecodeHook(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vint": "WHAT",
+ }
+
+ decodeHook := func(from reflect.Kind, to reflect.Kind, v interface{}) (interface{}, error) {
+ if from == reflect.String && to != reflect.String {
+ return 5, nil
+ }
+
+ return v, nil
+ }
+
+ var result Basic
+ config := &DecoderConfig{
+ DecodeHook: decodeHook,
+ Result: &result,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ err = decoder.Decode(input)
+ if err != nil {
+ t.Fatalf("got an err: %s", err)
+ }
+
+ if result.Vint != 5 {
+ t.Errorf("vint should be 5: %#v", result.Vint)
+ }
+}
+
+func TestDecode_DecodeHookType(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vint": "WHAT",
+ }
+
+ decodeHook := func(from reflect.Type, to reflect.Type, v interface{}) (interface{}, error) {
+ if from.Kind() == reflect.String &&
+ to.Kind() != reflect.String {
+ return 5, nil
+ }
+
+ return v, nil
+ }
+
+ var result Basic
+ config := &DecoderConfig{
+ DecodeHook: decodeHook,
+ Result: &result,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ err = decoder.Decode(input)
+ if err != nil {
+ t.Fatalf("got an err: %s", err)
+ }
+
+ if result.Vint != 5 {
+ t.Errorf("vint should be 5: %#v", result.Vint)
+ }
+}
+
+func TestDecode_Nil(t *testing.T) {
+ t.Parallel()
+
+ var input interface{} = nil
+ result := Basic{
+ Vstring: "foo",
+ }
+
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if result.Vstring != "foo" {
+ t.Fatalf("bad: %#v", result.Vstring)
+ }
+}
+
+func TestDecode_NonStruct(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "foo": "bar",
+ "bar": "baz",
+ }
+
+ var result map[string]string
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ if result["foo"] != "bar" {
+ t.Fatal("foo is not bar")
+ }
+}
+
+func TestDecode_StructMatch(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vbar": Basic{
+ Vstring: "foo",
+ },
+ }
+
+ var result Nested
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("got an err: %s", err.Error())
+ }
+
+ if result.Vbar.Vstring != "foo" {
+ t.Errorf("bad: %#v", result)
+ }
+}
+
+func TestDecode_TypeConversion(t *testing.T) {
+ input := map[string]interface{}{
+ "IntToFloat": 42,
+ "IntToUint": 42,
+ "IntToBool": 1,
+ "IntToString": 42,
+ "UintToInt": 42,
+ "UintToFloat": 42,
+ "UintToBool": 42,
+ "UintToString": 42,
+ "BoolToInt": true,
+ "BoolToUint": true,
+ "BoolToFloat": true,
+ "BoolToString": true,
+ "FloatToInt": 42.42,
+ "FloatToUint": 42.42,
+ "FloatToBool": 42.42,
+ "FloatToString": 42.42,
+ "SliceUint8ToString": []uint8("foo"),
+ "StringToInt": "42",
+ "StringToUint": "42",
+ "StringToBool": "1",
+ "StringToFloat": "42.42",
+ "SliceToMap": []interface{}{},
+ "MapToSlice": map[string]interface{}{},
+ }
+
+ expectedResultStrict := TypeConversionResult{
+ IntToFloat: 42.0,
+ IntToUint: 42,
+ UintToInt: 42,
+ UintToFloat: 42,
+ BoolToInt: 0,
+ BoolToUint: 0,
+ BoolToFloat: 0,
+ FloatToInt: 42,
+ FloatToUint: 42,
+ }
+
+ expectedResultWeak := TypeConversionResult{
+ IntToFloat: 42.0,
+ IntToUint: 42,
+ IntToBool: true,
+ IntToString: "42",
+ UintToInt: 42,
+ UintToFloat: 42,
+ UintToBool: true,
+ UintToString: "42",
+ BoolToInt: 1,
+ BoolToUint: 1,
+ BoolToFloat: 1,
+ BoolToString: "1",
+ FloatToInt: 42,
+ FloatToUint: 42,
+ FloatToBool: true,
+ FloatToString: "42.42",
+ SliceUint8ToString: "foo",
+ StringToInt: 42,
+ StringToUint: 42,
+ StringToBool: true,
+ StringToFloat: 42.42,
+ SliceToMap: map[string]interface{}{},
+ MapToSlice: []interface{}{},
+ }
+
+ // Test strict type conversion
+ var resultStrict TypeConversionResult
+ err := Decode(input, &resultStrict)
+ if err == nil {
+ t.Errorf("should return an error")
+ }
+ if !reflect.DeepEqual(resultStrict, expectedResultStrict) {
+ t.Errorf("expected %v, got: %v", expectedResultStrict, resultStrict)
+ }
+
+ // Test weak type conversion
+ var decoder *Decoder
+ var resultWeak TypeConversionResult
+
+ config := &DecoderConfig{
+ WeaklyTypedInput: true,
+ Result: &resultWeak,
+ }
+
+ decoder, err = NewDecoder(config)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ err = decoder.Decode(input)
+ if err != nil {
+ t.Fatalf("got an err: %s", err)
+ }
+
+ if !reflect.DeepEqual(resultWeak, expectedResultWeak) {
+ t.Errorf("expected \n%#v, got: \n%#v", expectedResultWeak, resultWeak)
+ }
+}
+
+func TestDecoder_ErrorUnused(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vstring": "hello",
+ "foo": "bar",
+ }
+
+ var result Basic
+ config := &DecoderConfig{
+ ErrorUnused: true,
+ Result: &result,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ err = decoder.Decode(input)
+ if err == nil {
+ t.Fatal("expected error")
+ }
+}
+
+func TestMap(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vfoo": "foo",
+ "vother": map[interface{}]interface{}{
+ "foo": "foo",
+ "bar": "bar",
+ },
+ }
+
+ var result Map
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("got an error: %s", err)
+ }
+
+ if result.Vfoo != "foo" {
+ t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo)
+ }
+
+ if result.Vother == nil {
+ t.Fatal("vother should not be nil")
+ }
+
+ if len(result.Vother) != 2 {
+ t.Error("vother should have two items")
+ }
+
+ if result.Vother["foo"] != "foo" {
+ t.Errorf("'foo' key should be foo, got: %#v", result.Vother["foo"])
+ }
+
+ if result.Vother["bar"] != "bar" {
+ t.Errorf("'bar' key should be bar, got: %#v", result.Vother["bar"])
+ }
+}
+
+func TestMapMerge(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vfoo": "foo",
+ "vother": map[interface{}]interface{}{
+ "foo": "foo",
+ "bar": "bar",
+ },
+ }
+
+ var result Map
+ result.Vother = map[string]string{"hello": "world"}
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("got an error: %s", err)
+ }
+
+ if result.Vfoo != "foo" {
+ t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo)
+ }
+
+ expected := map[string]string{
+ "foo": "foo",
+ "bar": "bar",
+ "hello": "world",
+ }
+ if !reflect.DeepEqual(result.Vother, expected) {
+ t.Errorf("bad: %#v", result.Vother)
+ }
+}
+
+func TestMapOfStruct(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "value": map[string]interface{}{
+ "foo": map[string]string{"vstring": "one"},
+ "bar": map[string]string{"vstring": "two"},
+ },
+ }
+
+ var result MapOfStruct
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("got an err: %s", err)
+ }
+
+ if result.Value == nil {
+ t.Fatal("value should not be nil")
+ }
+
+ if len(result.Value) != 2 {
+ t.Error("value should have two items")
+ }
+
+ if result.Value["foo"].Vstring != "one" {
+ t.Errorf("foo value should be 'one', got: %s", result.Value["foo"].Vstring)
+ }
+
+ if result.Value["bar"].Vstring != "two" {
+ t.Errorf("bar value should be 'two', got: %s", result.Value["bar"].Vstring)
+ }
+}
+
+func TestNestedType(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vfoo": "foo",
+ "vbar": map[string]interface{}{
+ "vstring": "foo",
+ "vint": 42,
+ "vbool": true,
+ },
+ }
+
+ var result Nested
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("got an err: %s", err.Error())
+ }
+
+ if result.Vfoo != "foo" {
+ t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo)
+ }
+
+ if result.Vbar.Vstring != "foo" {
+ t.Errorf("vstring value should be 'foo': %#v", result.Vbar.Vstring)
+ }
+
+ if result.Vbar.Vint != 42 {
+ t.Errorf("vint value should be 42: %#v", result.Vbar.Vint)
+ }
+
+ if result.Vbar.Vbool != true {
+ t.Errorf("vbool value should be true: %#v", result.Vbar.Vbool)
+ }
+
+ if result.Vbar.Vextra != "" {
+ t.Errorf("vextra value should be empty: %#v", result.Vbar.Vextra)
+ }
+}
+
+func TestNestedTypePointer(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vfoo": "foo",
+ "vbar": &map[string]interface{}{
+ "vstring": "foo",
+ "vint": 42,
+ "vbool": true,
+ },
+ }
+
+ var result NestedPointer
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("got an err: %s", err.Error())
+ }
+
+ if result.Vfoo != "foo" {
+ t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo)
+ }
+
+ if result.Vbar.Vstring != "foo" {
+ t.Errorf("vstring value should be 'foo': %#v", result.Vbar.Vstring)
+ }
+
+ if result.Vbar.Vint != 42 {
+ t.Errorf("vint value should be 42: %#v", result.Vbar.Vint)
+ }
+
+ if result.Vbar.Vbool != true {
+ t.Errorf("vbool value should be true: %#v", result.Vbar.Vbool)
+ }
+
+ if result.Vbar.Vextra != "" {
+ t.Errorf("vextra value should be empty: %#v", result.Vbar.Vextra)
+ }
+}
+
+func TestSlice(t *testing.T) {
+ t.Parallel()
+
+ inputStringSlice := map[string]interface{}{
+ "vfoo": "foo",
+ "vbar": []string{"foo", "bar", "baz"},
+ }
+
+ inputStringSlicePointer := map[string]interface{}{
+ "vfoo": "foo",
+ "vbar": &[]string{"foo", "bar", "baz"},
+ }
+
+ outputStringSlice := &Slice{
+ "foo",
+ []string{"foo", "bar", "baz"},
+ }
+
+ testSliceInput(t, inputStringSlice, outputStringSlice)
+ testSliceInput(t, inputStringSlicePointer, outputStringSlice)
+}
+
+func TestInvalidSlice(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vfoo": "foo",
+ "vbar": 42,
+ }
+
+ result := Slice{}
+ err := Decode(input, &result)
+ if err == nil {
+ t.Errorf("expected failure")
+ }
+}
+
+func TestSliceOfStruct(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "value": []map[string]interface{}{
+ {"vstring": "one"},
+ {"vstring": "two"},
+ },
+ }
+
+ var result SliceOfStruct
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("got unexpected error: %s", err)
+ }
+
+ if len(result.Value) != 2 {
+ t.Fatalf("expected two values, got %d", len(result.Value))
+ }
+
+ if result.Value[0].Vstring != "one" {
+ t.Errorf("first value should be 'one', got: %s", result.Value[0].Vstring)
+ }
+
+ if result.Value[1].Vstring != "two" {
+ t.Errorf("second value should be 'two', got: %s", result.Value[1].Vstring)
+ }
+}
+
+func TestSliceToMap(t *testing.T) {
+ t.Parallel()
+
+ input := []map[string]interface{}{
+ map[string]interface{}{
+ "foo": "bar",
+ },
+ map[string]interface{}{
+ "bar": "baz",
+ },
+ }
+
+ var result map[string]interface{}
+ err := WeakDecode(input, &result)
+ if err != nil {
+ t.Fatalf("got an error: %s", err)
+ }
+
+ expected := map[string]interface{}{
+ "foo": "bar",
+ "bar": "baz",
+ }
+ if !reflect.DeepEqual(result, expected) {
+ t.Errorf("bad: %#v", result)
+ }
+}
+
+func TestInvalidType(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vstring": 42,
+ }
+
+ var result Basic
+ err := Decode(input, &result)
+ if err == nil {
+ t.Fatal("error should exist")
+ }
+
+ derr, ok := err.(*Error)
+ if !ok {
+ t.Fatalf("error should be kind of Error, instead: %#v", err)
+ }
+
+ if derr.Errors[0] != "'Vstring' expected type 'string', got unconvertible type 'int'" {
+ t.Errorf("got unexpected error: %s", err)
+ }
+
+ inputNegIntUint := map[string]interface{}{
+ "vuint": -42,
+ }
+
+ err = Decode(inputNegIntUint, &result)
+ if err == nil {
+ t.Fatal("error should exist")
+ }
+
+ derr, ok = err.(*Error)
+ if !ok {
+ t.Fatalf("error should be kind of Error, instead: %#v", err)
+ }
+
+ if derr.Errors[0] != "cannot parse 'Vuint', -42 overflows uint" {
+ t.Errorf("got unexpected error: %s", err)
+ }
+
+ inputNegFloatUint := map[string]interface{}{
+ "vuint": -42.0,
+ }
+
+ err = Decode(inputNegFloatUint, &result)
+ if err == nil {
+ t.Fatal("error should exist")
+ }
+
+ derr, ok = err.(*Error)
+ if !ok {
+ t.Fatalf("error should be kind of Error, instead: %#v", err)
+ }
+
+ if derr.Errors[0] != "cannot parse 'Vuint', -42.000000 overflows uint" {
+ t.Errorf("got unexpected error: %s", err)
+ }
+}
+
+func TestMetadata(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vfoo": "foo",
+ "vbar": map[string]interface{}{
+ "vstring": "foo",
+ "Vuint": 42,
+ "foo": "bar",
+ },
+ "bar": "nil",
+ }
+
+ var md Metadata
+ var result Nested
+ config := &DecoderConfig{
+ Metadata: &md,
+ Result: &result,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ err = decoder.Decode(input)
+ if err != nil {
+ t.Fatalf("err: %s", err.Error())
+ }
+
+ expectedKeys := []string{"Vbar", "Vbar.Vstring", "Vbar.Vuint", "Vfoo"}
+ sort.Strings(md.Keys)
+ if !reflect.DeepEqual(md.Keys, expectedKeys) {
+ t.Fatalf("bad keys: %#v", md.Keys)
+ }
+
+ expectedUnused := []string{"Vbar.foo", "bar"}
+ if !reflect.DeepEqual(md.Unused, expectedUnused) {
+ t.Fatalf("bad unused: %#v", md.Unused)
+ }
+}
+
+func TestMetadata_Embedded(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "vstring": "foo",
+ "vunique": "bar",
+ }
+
+ var md Metadata
+ var result EmbeddedSquash
+ config := &DecoderConfig{
+ Metadata: &md,
+ Result: &result,
+ }
+
+ decoder, err := NewDecoder(config)
+ if err != nil {
+ t.Fatalf("err: %s", err)
+ }
+
+ err = decoder.Decode(input)
+ if err != nil {
+ t.Fatalf("err: %s", err.Error())
+ }
+
+ expectedKeys := []string{"Vstring", "Vunique"}
+
+ sort.Strings(md.Keys)
+ if !reflect.DeepEqual(md.Keys, expectedKeys) {
+ t.Fatalf("bad keys: %#v", md.Keys)
+ }
+
+ expectedUnused := []string{}
+ if !reflect.DeepEqual(md.Unused, expectedUnused) {
+ t.Fatalf("bad unused: %#v", md.Unused)
+ }
+}
+
+func TestNonPtrValue(t *testing.T) {
+ t.Parallel()
+
+ err := Decode(map[string]interface{}{}, Basic{})
+ if err == nil {
+ t.Fatal("error should exist")
+ }
+
+ if err.Error() != "result must be a pointer" {
+ t.Errorf("got unexpected error: %s", err)
+ }
+}
+
+func TestTagged(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "foo": "bar",
+ "bar": "value",
+ }
+
+ var result Tagged
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ if result.Value != "bar" {
+ t.Errorf("value should be 'bar', got: %#v", result.Value)
+ }
+
+ if result.Extra != "value" {
+ t.Errorf("extra should be 'value', got: %#v", result.Extra)
+ }
+}
+
+func TestWeakDecode(t *testing.T) {
+ t.Parallel()
+
+ input := map[string]interface{}{
+ "foo": "4",
+ "bar": "value",
+ }
+
+ var result struct {
+ Foo int
+ Bar string
+ }
+
+ if err := WeakDecode(input, &result); err != nil {
+ t.Fatalf("err: %s", err)
+ }
+ if result.Foo != 4 {
+ t.Fatalf("bad: %#v", result)
+ }
+ if result.Bar != "value" {
+ t.Fatalf("bad: %#v", result)
+ }
+}
+
+func testSliceInput(t *testing.T, input map[string]interface{}, expected *Slice) {
+ var result Slice
+ err := Decode(input, &result)
+ if err != nil {
+ t.Fatalf("got error: %s", err)
+ }
+
+ if result.Vfoo != expected.Vfoo {
+ t.Errorf("Vfoo expected '%s', got '%s'", expected.Vfoo, result.Vfoo)
+ }
+
+ if result.Vbar == nil {
+ t.Fatalf("Vbar a slice, got '%#v'", result.Vbar)
+ }
+
+ if len(result.Vbar) != len(expected.Vbar) {
+ t.Errorf("Vbar length should be %d, got %d", len(expected.Vbar), len(result.Vbar))
+ }
+
+ for i, v := range result.Vbar {
+ if v != expected.Vbar[i] {
+ t.Errorf(
+ "Vbar[%d] should be '%#v', got '%#v'",
+ i, expected.Vbar[i], v)
+ }
+ }
+}
diff --git a/vendor/github.com/russross/blackfriday/.gitignore b/vendor/github.com/russross/blackfriday/.gitignore
new file mode 100644
index 0000000000..75623dcccb
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/.gitignore
@@ -0,0 +1,8 @@
+*.out
+*.swp
+*.8
+*.6
+_obj
+_test*
+markdown
+tags
diff --git a/vendor/github.com/russross/blackfriday/.travis.yml b/vendor/github.com/russross/blackfriday/.travis.yml
new file mode 100644
index 0000000000..208fd25bcd
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/.travis.yml
@@ -0,0 +1,18 @@
+# Travis CI (http://travis-ci.org/) is a continuous integration service for
+# open source projects. This file configures it to run unit tests for
+# blackfriday.
+
+language: go
+
+go:
+ - 1.2
+ - 1.3
+ - 1.4
+ - 1.5
+
+install:
+ - go get -d -t -v ./...
+ - go build -v ./...
+
+script:
+ - go test -v ./...
diff --git a/vendor/github.com/russross/blackfriday/LICENSE.txt b/vendor/github.com/russross/blackfriday/LICENSE.txt
new file mode 100644
index 0000000000..2885af3602
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/LICENSE.txt
@@ -0,0 +1,29 @@
+Blackfriday is distributed under the Simplified BSD License:
+
+> Copyright © 2011 Russ Ross
+> All rights reserved.
+>
+> Redistribution and use in source and binary forms, with or without
+> modification, are permitted provided that the following conditions
+> are met:
+>
+> 1. Redistributions of source code must retain the above copyright
+> notice, this list of conditions and the following disclaimer.
+>
+> 2. Redistributions in binary form must reproduce the above
+> copyright notice, this list of conditions and the following
+> disclaimer in the documentation and/or other materials provided with
+> the distribution.
+>
+> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+> POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/russross/blackfriday/README.md b/vendor/github.com/russross/blackfriday/README.md
new file mode 100644
index 0000000000..7650ce44ec
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/README.md
@@ -0,0 +1,267 @@
+Blackfriday [![Build Status](https://travis-ci.org/russross/blackfriday.svg?branch=master)](https://travis-ci.org/russross/blackfriday)
+===========
+
+Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It
+is paranoid about its input (so you can safely feed it user-supplied
+data), it is fast, it supports common extensions (tables, smart
+punctuation substitutions, etc.), and it is safe for all utf-8
+(unicode) input.
+
+HTML output is currently supported, along with Smartypants
+extensions. An experimental LaTeX output engine is also included.
+
+It started as a translation from C of [Sundown][3].
+
+
+Installation
+------------
+
+Blackfriday is compatible with Go 1. If you are using an older
+release of Go, consider using v1.1 of blackfriday, which was based
+on the last stable release of Go prior to Go 1. You can find it as a
+tagged commit on github.
+
+With Go 1 and git installed:
+
+ go get github.com/russross/blackfriday
+
+will download, compile, and install the package into your `$GOPATH`
+directory hierarchy. Alternatively, you can achieve the same if you
+import it into a project:
+
+ import "github.com/russross/blackfriday"
+
+and `go get` without parameters.
+
+Usage
+-----
+
+For basic usage, it is as simple as getting your input into a byte
+slice and calling:
+
+ output := blackfriday.MarkdownBasic(input)
+
+This renders it with no extensions enabled. To get a more useful
+feature set, use this instead:
+
+ output := blackfriday.MarkdownCommon(input)
+
+### Sanitize untrusted content
+
+Blackfriday itself does nothing to protect against malicious content. If you are
+dealing with user-supplied markdown, we recommend running blackfriday's output
+through HTML sanitizer such as
+[Bluemonday](https://github.com/microcosm-cc/bluemonday).
+
+Here's an example of simple usage of blackfriday together with bluemonday:
+
+``` go
+import (
+ "github.com/microcosm-cc/bluemonday"
+ "github.com/russross/blackfriday"
+)
+
+// ...
+unsafe := blackfriday.MarkdownCommon(input)
+html := bluemonday.UGCPolicy().SanitizeBytes(unsafe)
+```
+
+### Custom options
+
+If you want to customize the set of options, first get a renderer
+(currently either the HTML or LaTeX output engines), then use it to
+call the more general `Markdown` function. For examples, see the
+implementations of `MarkdownBasic` and `MarkdownCommon` in
+`markdown.go`.
+
+You can also check out `blackfriday-tool` for a more complete example
+of how to use it. Download and install it using:
+
+ go get github.com/russross/blackfriday-tool
+
+This is a simple command-line tool that allows you to process a
+markdown file using a standalone program. You can also browse the
+source directly on github if you are just looking for some example
+code:
+
+*
+
+Note that if you have not already done so, installing
+`blackfriday-tool` will be sufficient to download and install
+blackfriday in addition to the tool itself. The tool binary will be
+installed in `$GOPATH/bin`. This is a statically-linked binary that
+can be copied to wherever you need it without worrying about
+dependencies and library versions.
+
+
+Features
+--------
+
+All features of Sundown are supported, including:
+
+* **Compatibility**. The Markdown v1.0.3 test suite passes with
+ the `--tidy` option. Without `--tidy`, the differences are
+ mostly in whitespace and entity escaping, where blackfriday is
+ more consistent and cleaner.
+
+* **Common extensions**, including table support, fenced code
+ blocks, autolinks, strikethroughs, non-strict emphasis, etc.
+
+* **Safety**. Blackfriday is paranoid when parsing, making it safe
+ to feed untrusted user input without fear of bad things
+ happening. The test suite stress tests this and there are no
+ known inputs that make it crash. If you find one, please let me
+ know and send me the input that does it.
+
+ NOTE: "safety" in this context means *runtime safety only*. In order to
+ protect yourself against JavaScript injection in untrusted content, see
+ [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content).
+
+* **Fast processing**. It is fast enough to render on-demand in
+ most web applications without having to cache the output.
+
+* **Thread safety**. You can run multiple parsers in different
+ goroutines without ill effect. There is no dependence on global
+ shared state.
+
+* **Minimal dependencies**. Blackfriday only depends on standard
+ library packages in Go. The source code is pretty
+ self-contained, so it is easy to add to any project, including
+ Google App Engine projects.
+
+* **Standards compliant**. Output successfully validates using the
+ W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional.
+
+
+Extensions
+----------
+
+In addition to the standard markdown syntax, this package
+implements the following extensions:
+
+* **Intra-word emphasis supression**. The `_` character is
+ commonly used inside words when discussing code, so having
+ markdown interpret it as an emphasis command is usually the
+ wrong thing. Blackfriday lets you treat all emphasis markers as
+ normal characters when they occur inside a word.
+
+* **Tables**. Tables can be created by drawing them in the input
+ using a simple syntax:
+
+ ```
+ Name | Age
+ --------|------
+ Bob | 27
+ Alice | 23
+ ```
+
+* **Fenced code blocks**. In addition to the normal 4-space
+ indentation to mark code blocks, you can explicitly mark them
+ and supply a language (to make syntax highlighting simple). Just
+ mark it like this:
+
+ ``` go
+ func getTrue() bool {
+ return true
+ }
+ ```
+
+ You can use 3 or more backticks to mark the beginning of the
+ block, and the same number to mark the end of the block.
+
+* **Definition lists**. A simple definition list is made of a single-line
+ term followed by a colon and the definition for that term.
+
+ Cat
+ : Fluffy animal everyone likes
+
+ Internet
+ : Vector of transmission for pictures of cats
+
+ Terms must be separated from the previous definition by a blank line.
+
+* **Footnotes**. A marker in the text that will become a superscript number;
+ a footnote definition that will be placed in a list of footnotes at the
+ end of the document. A footnote looks like this:
+
+ This is a footnote.[^1]
+
+ [^1]: the footnote text.
+
+* **Autolinking**. Blackfriday can find URLs that have not been
+ explicitly marked as links and turn them into links.
+
+* **Strikethrough**. Use two tildes (`~~`) to mark text that
+ should be crossed out.
+
+* **Hard line breaks**. With this extension enabled (it is off by
+ default in the `MarkdownBasic` and `MarkdownCommon` convenience
+ functions), newlines in the input translate into line breaks in
+ the output.
+
+* **Smart quotes**. Smartypants-style punctuation substitution is
+ supported, turning normal double- and single-quote marks into
+ curly quotes, etc.
+
+* **LaTeX-style dash parsing** is an additional option, where `--`
+ is translated into `–`, and `---` is translated into
+ `—`. This differs from most smartypants processors, which
+ turn a single hyphen into an ndash and a double hyphen into an
+ mdash.
+
+* **Smart fractions**, where anything that looks like a fraction
+ is translated into suitable HTML (instead of just a few special
+ cases like most smartypant processors). For example, `4/5`
+ becomes `4 ⁄5 `, which renders as
+ 4 ⁄5 .
+
+
+Other renderers
+---------------
+
+Blackfriday is structured to allow alternative rendering engines. Here
+are a few of note:
+
+* [github_flavored_markdown](https://godoc.org/github.com/shurcooL/github_flavored_markdown):
+ provides a GitHub Flavored Markdown renderer with fenced code block
+ highlighting, clickable header anchor links.
+
+ It's not customizable, and its goal is to produce HTML output
+ equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode),
+ except the rendering is performed locally.
+
+* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt,
+ but for markdown.
+
+* LaTeX output: renders output as LaTeX. This is currently part of the
+ main Blackfriday repository, but may be split into its own project
+ in the future. If you are interested in owning and maintaining the
+ LaTeX output component, please be in touch.
+
+ It renders some basic documents, but is only experimental at this
+ point. In particular, it does not do any inline escaping, so input
+ that happens to look like LaTeX code will be passed through without
+ modification.
+
+* [Md2Vim](https://github.com/FooSoft/md2vim): transforms markdown files into vimdoc format.
+
+
+Todo
+----
+
+* More unit testing
+* Improve unicode support. It does not understand all unicode
+ rules (about what constitutes a letter, a punctuation symbol,
+ etc.), so it may fail to detect word boundaries correctly in
+ some instances. It is safe on all utf-8 input.
+
+
+License
+-------
+
+[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt)
+
+
+ [1]: http://daringfireball.net/projects/markdown/ "Markdown"
+ [2]: http://golang.org/ "Go Language"
+ [3]: https://github.com/vmg/sundown "Sundown"
diff --git a/vendor/github.com/russross/blackfriday/block.go b/vendor/github.com/russross/blackfriday/block.go
new file mode 100644
index 0000000000..740ad46263
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/block.go
@@ -0,0 +1,1412 @@
+//
+// Blackfriday Markdown Processor
+// Available at http://github.com/russross/blackfriday
+//
+// Copyright © 2011 Russ Ross .
+// Distributed under the Simplified BSD License.
+// See README.md for details.
+//
+
+//
+// Functions to parse block-level elements.
+//
+
+package blackfriday
+
+import (
+ "bytes"
+
+ "github.com/shurcooL/sanitized_anchor_name"
+)
+
+// Parse block-level data.
+// Note: this function and many that it calls assume that
+// the input buffer ends with a newline.
+func (p *parser) block(out *bytes.Buffer, data []byte) {
+ if len(data) == 0 || data[len(data)-1] != '\n' {
+ panic("block input is missing terminating newline")
+ }
+
+ // this is called recursively: enforce a maximum depth
+ if p.nesting >= p.maxNesting {
+ return
+ }
+ p.nesting++
+
+ // parse out one block-level construct at a time
+ for len(data) > 0 {
+ // prefixed header:
+ //
+ // # Header 1
+ // ## Header 2
+ // ...
+ // ###### Header 6
+ if p.isPrefixHeader(data) {
+ data = data[p.prefixHeader(out, data):]
+ continue
+ }
+
+ // block of preformatted HTML:
+ //
+ //
+ // ...
+ //
+ if data[0] == '<' {
+ if i := p.html(out, data, true); i > 0 {
+ data = data[i:]
+ continue
+ }
+ }
+
+ // title block
+ //
+ // % stuff
+ // % more stuff
+ // % even more stuff
+ if p.flags&EXTENSION_TITLEBLOCK != 0 {
+ if data[0] == '%' {
+ if i := p.titleBlock(out, data, true); i > 0 {
+ data = data[i:]
+ continue
+ }
+ }
+ }
+
+ // blank lines. note: returns the # of bytes to skip
+ if i := p.isEmpty(data); i > 0 {
+ data = data[i:]
+ continue
+ }
+
+ // indented code block:
+ //
+ // func max(a, b int) int {
+ // if a > b {
+ // return a
+ // }
+ // return b
+ // }
+ if p.codePrefix(data) > 0 {
+ data = data[p.code(out, data):]
+ continue
+ }
+
+ // fenced code block:
+ //
+ // ``` go
+ // func fact(n int) int {
+ // if n <= 1 {
+ // return n
+ // }
+ // return n * fact(n-1)
+ // }
+ // ```
+ if p.flags&EXTENSION_FENCED_CODE != 0 {
+ if i := p.fencedCode(out, data, true); i > 0 {
+ data = data[i:]
+ continue
+ }
+ }
+
+ // horizontal rule:
+ //
+ // ------
+ // or
+ // ******
+ // or
+ // ______
+ if p.isHRule(data) {
+ p.r.HRule(out)
+ var i int
+ for i = 0; data[i] != '\n'; i++ {
+ }
+ data = data[i:]
+ continue
+ }
+
+ // block quote:
+ //
+ // > A big quote I found somewhere
+ // > on the web
+ if p.quotePrefix(data) > 0 {
+ data = data[p.quote(out, data):]
+ continue
+ }
+
+ // table:
+ //
+ // Name | Age | Phone
+ // ------|-----|---------
+ // Bob | 31 | 555-1234
+ // Alice | 27 | 555-4321
+ if p.flags&EXTENSION_TABLES != 0 {
+ if i := p.table(out, data); i > 0 {
+ data = data[i:]
+ continue
+ }
+ }
+
+ // an itemized/unordered list:
+ //
+ // * Item 1
+ // * Item 2
+ //
+ // also works with + or -
+ if p.uliPrefix(data) > 0 {
+ data = data[p.list(out, data, 0):]
+ continue
+ }
+
+ // a numbered/ordered list:
+ //
+ // 1. Item 1
+ // 2. Item 2
+ if p.oliPrefix(data) > 0 {
+ data = data[p.list(out, data, LIST_TYPE_ORDERED):]
+ continue
+ }
+
+ // definition lists:
+ //
+ // Term 1
+ // : Definition a
+ // : Definition b
+ //
+ // Term 2
+ // : Definition c
+ if p.flags&EXTENSION_DEFINITION_LISTS != 0 {
+ if p.dliPrefix(data) > 0 {
+ data = data[p.list(out, data, LIST_TYPE_DEFINITION):]
+ continue
+ }
+ }
+
+ // anything else must look like a normal paragraph
+ // note: this finds underlined headers, too
+ data = data[p.paragraph(out, data):]
+ }
+
+ p.nesting--
+}
+
+func (p *parser) isPrefixHeader(data []byte) bool {
+ if data[0] != '#' {
+ return false
+ }
+
+ if p.flags&EXTENSION_SPACE_HEADERS != 0 {
+ level := 0
+ for level < 6 && data[level] == '#' {
+ level++
+ }
+ if data[level] != ' ' {
+ return false
+ }
+ }
+ return true
+}
+
+func (p *parser) prefixHeader(out *bytes.Buffer, data []byte) int {
+ level := 0
+ for level < 6 && data[level] == '#' {
+ level++
+ }
+ i := skipChar(data, level, ' ')
+ end := skipUntilChar(data, i, '\n')
+ skip := end
+ id := ""
+ if p.flags&EXTENSION_HEADER_IDS != 0 {
+ j, k := 0, 0
+ // find start/end of header id
+ for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ {
+ }
+ for k = j + 1; k < end && data[k] != '}'; k++ {
+ }
+ // extract header id iff found
+ if j < end && k < end {
+ id = string(data[j+2 : k])
+ end = j
+ skip = k + 1
+ for end > 0 && data[end-1] == ' ' {
+ end--
+ }
+ }
+ }
+ for end > 0 && data[end-1] == '#' {
+ if isBackslashEscaped(data, end-1) {
+ break
+ }
+ end--
+ }
+ for end > 0 && data[end-1] == ' ' {
+ end--
+ }
+ if end > i {
+ if id == "" && p.flags&EXTENSION_AUTO_HEADER_IDS != 0 {
+ id = sanitized_anchor_name.Create(string(data[i:end]))
+ }
+ work := func() bool {
+ p.inline(out, data[i:end])
+ return true
+ }
+ p.r.Header(out, work, level, id)
+ }
+ return skip
+}
+
+func (p *parser) isUnderlinedHeader(data []byte) int {
+ // test of level 1 header
+ if data[0] == '=' {
+ i := skipChar(data, 1, '=')
+ i = skipChar(data, i, ' ')
+ if data[i] == '\n' {
+ return 1
+ } else {
+ return 0
+ }
+ }
+
+ // test of level 2 header
+ if data[0] == '-' {
+ i := skipChar(data, 1, '-')
+ i = skipChar(data, i, ' ')
+ if data[i] == '\n' {
+ return 2
+ } else {
+ return 0
+ }
+ }
+
+ return 0
+}
+
+func (p *parser) titleBlock(out *bytes.Buffer, data []byte, doRender bool) int {
+ if data[0] != '%' {
+ return 0
+ }
+ splitData := bytes.Split(data, []byte("\n"))
+ var i int
+ for idx, b := range splitData {
+ if !bytes.HasPrefix(b, []byte("%")) {
+ i = idx // - 1
+ break
+ }
+ }
+
+ data = bytes.Join(splitData[0:i], []byte("\n"))
+ p.r.TitleBlock(out, data)
+
+ return len(data)
+}
+
+func (p *parser) html(out *bytes.Buffer, data []byte, doRender bool) int {
+ var i, j int
+
+ // identify the opening tag
+ if data[0] != '<' {
+ return 0
+ }
+ curtag, tagfound := p.htmlFindTag(data[1:])
+
+ // handle special cases
+ if !tagfound {
+ // check for an HTML comment
+ if size := p.htmlComment(out, data, doRender); size > 0 {
+ return size
+ }
+
+ // check for an tag
+ if size := p.htmlHr(out, data, doRender); size > 0 {
+ return size
+ }
+
+ // check for HTML CDATA
+ if size := p.htmlCDATA(out, data, doRender); size > 0 {
+ return size
+ }
+
+ // no special case recognized
+ return 0
+ }
+
+ // look for an unindented matching closing tag
+ // followed by a blank line
+ found := false
+ /*
+ closetag := []byte("\n" + curtag + ">")
+ j = len(curtag) + 1
+ for !found {
+ // scan for a closing tag at the beginning of a line
+ if skip := bytes.Index(data[j:], closetag); skip >= 0 {
+ j += skip + len(closetag)
+ } else {
+ break
+ }
+
+ // see if it is the only thing on the line
+ if skip := p.isEmpty(data[j:]); skip > 0 {
+ // see if it is followed by a blank line/eof
+ j += skip
+ if j >= len(data) {
+ found = true
+ i = j
+ } else {
+ if skip := p.isEmpty(data[j:]); skip > 0 {
+ j += skip
+ found = true
+ i = j
+ }
+ }
+ }
+ }
+ */
+
+ // if not found, try a second pass looking for indented match
+ // but not if tag is "ins" or "del" (following original Markdown.pl)
+ if !found && curtag != "ins" && curtag != "del" {
+ i = 1
+ for i < len(data) {
+ i++
+ for i < len(data) && !(data[i-1] == '<' && data[i] == '/') {
+ i++
+ }
+
+ if i+2+len(curtag) >= len(data) {
+ break
+ }
+
+ j = p.htmlFindEnd(curtag, data[i-1:])
+
+ if j > 0 {
+ i += j - 1
+ found = true
+ break
+ }
+ }
+ }
+
+ if !found {
+ return 0
+ }
+
+ // the end of the block has been found
+ if doRender {
+ // trim newlines
+ end := i
+ for end > 0 && data[end-1] == '\n' {
+ end--
+ }
+ p.r.BlockHtml(out, data[:end])
+ }
+
+ return i
+}
+
+func (p *parser) renderHTMLBlock(out *bytes.Buffer, data []byte, start int, doRender bool) int {
+ // html block needs to end with a blank line
+ if i := p.isEmpty(data[start:]); i > 0 {
+ size := start + i
+ if doRender {
+ // trim trailing newlines
+ end := size
+ for end > 0 && data[end-1] == '\n' {
+ end--
+ }
+ p.r.BlockHtml(out, data[:end])
+ }
+ return size
+ }
+ return 0
+}
+
+// HTML comment, lax form
+func (p *parser) htmlComment(out *bytes.Buffer, data []byte, doRender bool) int {
+ i := p.inlineHTMLComment(out, data)
+ return p.renderHTMLBlock(out, data, i, doRender)
+}
+
+// HTML CDATA section
+func (p *parser) htmlCDATA(out *bytes.Buffer, data []byte, doRender bool) int {
+ const cdataTag = "') {
+ i++
+ }
+ i++
+ // no end-of-comment marker
+ if i >= len(data) {
+ return 0
+ }
+ return p.renderHTMLBlock(out, data, i, doRender)
+}
+
+// HR, which is the only self-closing block tag considered
+func (p *parser) htmlHr(out *bytes.Buffer, data []byte, doRender bool) int {
+ if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') {
+ return 0
+ }
+ if data[3] != ' ' && data[3] != '/' && data[3] != '>' {
+ // not an tag after all; at least not a valid one
+ return 0
+ }
+
+ i := 3
+ for data[i] != '>' && data[i] != '\n' {
+ i++
+ }
+
+ if data[i] == '>' {
+ return p.renderHTMLBlock(out, data, i+1, doRender)
+ }
+
+ return 0
+}
+
+func (p *parser) htmlFindTag(data []byte) (string, bool) {
+ i := 0
+ for isalnum(data[i]) {
+ i++
+ }
+ key := string(data[:i])
+ if _, ok := blockTags[key]; ok {
+ return key, true
+ }
+ return "", false
+}
+
+func (p *parser) htmlFindEnd(tag string, data []byte) int {
+ // assume data[0] == '<' && data[1] == '/' already tested
+
+ // check if tag is a match
+ closetag := []byte("" + tag + ">")
+ if !bytes.HasPrefix(data, closetag) {
+ return 0
+ }
+ i := len(closetag)
+
+ // check that the rest of the line is blank
+ skip := 0
+ if skip = p.isEmpty(data[i:]); skip == 0 {
+ return 0
+ }
+ i += skip
+ skip = 0
+
+ if i >= len(data) {
+ return i
+ }
+
+ if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 {
+ return i
+ }
+ if skip = p.isEmpty(data[i:]); skip == 0 {
+ // following line must be blank
+ return 0
+ }
+
+ return i + skip
+}
+
+func (p *parser) isEmpty(data []byte) int {
+ // it is okay to call isEmpty on an empty buffer
+ if len(data) == 0 {
+ return 0
+ }
+
+ var i int
+ for i = 0; i < len(data) && data[i] != '\n'; i++ {
+ if data[i] != ' ' && data[i] != '\t' {
+ return 0
+ }
+ }
+ return i + 1
+}
+
+func (p *parser) isHRule(data []byte) bool {
+ i := 0
+
+ // skip up to three spaces
+ for i < 3 && data[i] == ' ' {
+ i++
+ }
+
+ // look at the hrule char
+ if data[i] != '*' && data[i] != '-' && data[i] != '_' {
+ return false
+ }
+ c := data[i]
+
+ // the whole line must be the char or whitespace
+ n := 0
+ for data[i] != '\n' {
+ switch {
+ case data[i] == c:
+ n++
+ case data[i] != ' ':
+ return false
+ }
+ i++
+ }
+
+ return n >= 3
+}
+
+func (p *parser) isFencedCode(data []byte, syntax **string, oldmarker string) (skip int, marker string) {
+ i, size := 0, 0
+ skip = 0
+
+ // skip up to three spaces
+ for i < len(data) && i < 3 && data[i] == ' ' {
+ i++
+ }
+ if i >= len(data) {
+ return
+ }
+
+ // check for the marker characters: ~ or `
+ if data[i] != '~' && data[i] != '`' {
+ return
+ }
+
+ c := data[i]
+
+ // the whole line must be the same char or whitespace
+ for i < len(data) && data[i] == c {
+ size++
+ i++
+ }
+
+ if i >= len(data) {
+ return
+ }
+
+ // the marker char must occur at least 3 times
+ if size < 3 {
+ return
+ }
+ marker = string(data[i-size : i])
+
+ // if this is the end marker, it must match the beginning marker
+ if oldmarker != "" && marker != oldmarker {
+ return
+ }
+
+ if syntax != nil {
+ syn := 0
+ i = skipChar(data, i, ' ')
+
+ if i >= len(data) {
+ return
+ }
+
+ syntaxStart := i
+
+ if data[i] == '{' {
+ i++
+ syntaxStart++
+
+ for i < len(data) && data[i] != '}' && data[i] != '\n' {
+ syn++
+ i++
+ }
+
+ if i >= len(data) || data[i] != '}' {
+ return
+ }
+
+ // strip all whitespace at the beginning and the end
+ // of the {} block
+ for syn > 0 && isspace(data[syntaxStart]) {
+ syntaxStart++
+ syn--
+ }
+
+ for syn > 0 && isspace(data[syntaxStart+syn-1]) {
+ syn--
+ }
+
+ i++
+ } else {
+ for i < len(data) && !isspace(data[i]) {
+ syn++
+ i++
+ }
+ }
+
+ language := string(data[syntaxStart : syntaxStart+syn])
+ *syntax = &language
+ }
+
+ i = skipChar(data, i, ' ')
+ if i >= len(data) || data[i] != '\n' {
+ return
+ }
+
+ skip = i + 1
+ return
+}
+
+func (p *parser) fencedCode(out *bytes.Buffer, data []byte, doRender bool) int {
+ var lang *string
+ beg, marker := p.isFencedCode(data, &lang, "")
+ if beg == 0 || beg >= len(data) {
+ return 0
+ }
+
+ var work bytes.Buffer
+
+ for {
+ // safe to assume beg < len(data)
+
+ // check for the end of the code block
+ fenceEnd, _ := p.isFencedCode(data[beg:], nil, marker)
+ if fenceEnd != 0 {
+ beg += fenceEnd
+ break
+ }
+
+ // copy the current line
+ end := skipUntilChar(data, beg, '\n') + 1
+
+ // did we reach the end of the buffer without a closing marker?
+ if end >= len(data) {
+ return 0
+ }
+
+ // verbatim copy to the working buffer
+ if doRender {
+ work.Write(data[beg:end])
+ }
+ beg = end
+ }
+
+ syntax := ""
+ if lang != nil {
+ syntax = *lang
+ }
+
+ if doRender {
+ p.r.BlockCode(out, work.Bytes(), syntax)
+ }
+
+ return beg
+}
+
+func (p *parser) table(out *bytes.Buffer, data []byte) int {
+ var header bytes.Buffer
+ i, columns := p.tableHeader(&header, data)
+ if i == 0 {
+ return 0
+ }
+
+ var body bytes.Buffer
+
+ for i < len(data) {
+ pipes, rowStart := 0, i
+ for ; data[i] != '\n'; i++ {
+ if data[i] == '|' {
+ pipes++
+ }
+ }
+
+ if pipes == 0 {
+ i = rowStart
+ break
+ }
+
+ // include the newline in data sent to tableRow
+ i++
+ p.tableRow(&body, data[rowStart:i], columns, false)
+ }
+
+ p.r.Table(out, header.Bytes(), body.Bytes(), columns)
+
+ return i
+}
+
+// check if the specified position is preceded by an odd number of backslashes
+func isBackslashEscaped(data []byte, i int) bool {
+ backslashes := 0
+ for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' {
+ backslashes++
+ }
+ return backslashes&1 == 1
+}
+
+func (p *parser) tableHeader(out *bytes.Buffer, data []byte) (size int, columns []int) {
+ i := 0
+ colCount := 1
+ for i = 0; data[i] != '\n'; i++ {
+ if data[i] == '|' && !isBackslashEscaped(data, i) {
+ colCount++
+ }
+ }
+
+ // doesn't look like a table header
+ if colCount == 1 {
+ return
+ }
+
+ // include the newline in the data sent to tableRow
+ header := data[:i+1]
+
+ // column count ignores pipes at beginning or end of line
+ if data[0] == '|' {
+ colCount--
+ }
+ if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) {
+ colCount--
+ }
+
+ columns = make([]int, colCount)
+
+ // move on to the header underline
+ i++
+ if i >= len(data) {
+ return
+ }
+
+ if data[i] == '|' && !isBackslashEscaped(data, i) {
+ i++
+ }
+ i = skipChar(data, i, ' ')
+
+ // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3
+ // and trailing | optional on last column
+ col := 0
+ for data[i] != '\n' {
+ dashes := 0
+
+ if data[i] == ':' {
+ i++
+ columns[col] |= TABLE_ALIGNMENT_LEFT
+ dashes++
+ }
+ for data[i] == '-' {
+ i++
+ dashes++
+ }
+ if data[i] == ':' {
+ i++
+ columns[col] |= TABLE_ALIGNMENT_RIGHT
+ dashes++
+ }
+ for data[i] == ' ' {
+ i++
+ }
+
+ // end of column test is messy
+ switch {
+ case dashes < 3:
+ // not a valid column
+ return
+
+ case data[i] == '|' && !isBackslashEscaped(data, i):
+ // marker found, now skip past trailing whitespace
+ col++
+ i++
+ for data[i] == ' ' {
+ i++
+ }
+
+ // trailing junk found after last column
+ if col >= colCount && data[i] != '\n' {
+ return
+ }
+
+ case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount:
+ // something else found where marker was required
+ return
+
+ case data[i] == '\n':
+ // marker is optional for the last column
+ col++
+
+ default:
+ // trailing junk found after last column
+ return
+ }
+ }
+ if col != colCount {
+ return
+ }
+
+ p.tableRow(out, header, columns, true)
+ size = i + 1
+ return
+}
+
+func (p *parser) tableRow(out *bytes.Buffer, data []byte, columns []int, header bool) {
+ i, col := 0, 0
+ var rowWork bytes.Buffer
+
+ if data[i] == '|' && !isBackslashEscaped(data, i) {
+ i++
+ }
+
+ for col = 0; col < len(columns) && i < len(data); col++ {
+ for data[i] == ' ' {
+ i++
+ }
+
+ cellStart := i
+
+ for (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' {
+ i++
+ }
+
+ cellEnd := i
+
+ // skip the end-of-cell marker, possibly taking us past end of buffer
+ i++
+
+ for cellEnd > cellStart && data[cellEnd-1] == ' ' {
+ cellEnd--
+ }
+
+ var cellWork bytes.Buffer
+ p.inline(&cellWork, data[cellStart:cellEnd])
+
+ if header {
+ p.r.TableHeaderCell(&rowWork, cellWork.Bytes(), columns[col])
+ } else {
+ p.r.TableCell(&rowWork, cellWork.Bytes(), columns[col])
+ }
+ }
+
+ // pad it out with empty columns to get the right number
+ for ; col < len(columns); col++ {
+ if header {
+ p.r.TableHeaderCell(&rowWork, nil, columns[col])
+ } else {
+ p.r.TableCell(&rowWork, nil, columns[col])
+ }
+ }
+
+ // silently ignore rows with too many cells
+
+ p.r.TableRow(out, rowWork.Bytes())
+}
+
+// returns blockquote prefix length
+func (p *parser) quotePrefix(data []byte) int {
+ i := 0
+ for i < 3 && data[i] == ' ' {
+ i++
+ }
+ if data[i] == '>' {
+ if data[i+1] == ' ' {
+ return i + 2
+ }
+ return i + 1
+ }
+ return 0
+}
+
+// blockquote ends with at least one blank line
+// followed by something without a blockquote prefix
+func (p *parser) terminateBlockquote(data []byte, beg, end int) bool {
+ if p.isEmpty(data[beg:]) <= 0 {
+ return false
+ }
+ if end >= len(data) {
+ return true
+ }
+ return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0
+}
+
+// parse a blockquote fragment
+func (p *parser) quote(out *bytes.Buffer, data []byte) int {
+ var raw bytes.Buffer
+ beg, end := 0, 0
+ for beg < len(data) {
+ end = beg
+ // Step over whole lines, collecting them. While doing that, check for
+ // fenced code and if one's found, incorporate it altogether,
+ // irregardless of any contents inside it
+ for data[end] != '\n' {
+ if p.flags&EXTENSION_FENCED_CODE != 0 {
+ if i := p.fencedCode(out, data[end:], false); i > 0 {
+ // -1 to compensate for the extra end++ after the loop:
+ end += i - 1
+ break
+ }
+ }
+ end++
+ }
+ end++
+
+ if pre := p.quotePrefix(data[beg:]); pre > 0 {
+ // skip the prefix
+ beg += pre
+ } else if p.terminateBlockquote(data, beg, end) {
+ break
+ }
+
+ // this line is part of the blockquote
+ raw.Write(data[beg:end])
+ beg = end
+ }
+
+ var cooked bytes.Buffer
+ p.block(&cooked, raw.Bytes())
+ p.r.BlockQuote(out, cooked.Bytes())
+ return end
+}
+
+// returns prefix length for block code
+func (p *parser) codePrefix(data []byte) int {
+ if data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' {
+ return 4
+ }
+ return 0
+}
+
+func (p *parser) code(out *bytes.Buffer, data []byte) int {
+ var work bytes.Buffer
+
+ i := 0
+ for i < len(data) {
+ beg := i
+ for data[i] != '\n' {
+ i++
+ }
+ i++
+
+ blankline := p.isEmpty(data[beg:i]) > 0
+ if pre := p.codePrefix(data[beg:i]); pre > 0 {
+ beg += pre
+ } else if !blankline {
+ // non-empty, non-prefixed line breaks the pre
+ i = beg
+ break
+ }
+
+ // verbatim copy to the working buffeu
+ if blankline {
+ work.WriteByte('\n')
+ } else {
+ work.Write(data[beg:i])
+ }
+ }
+
+ // trim all the \n off the end of work
+ workbytes := work.Bytes()
+ eol := len(workbytes)
+ for eol > 0 && workbytes[eol-1] == '\n' {
+ eol--
+ }
+ if eol != len(workbytes) {
+ work.Truncate(eol)
+ }
+
+ work.WriteByte('\n')
+
+ p.r.BlockCode(out, work.Bytes(), "")
+
+ return i
+}
+
+// returns unordered list item prefix
+func (p *parser) uliPrefix(data []byte) int {
+ i := 0
+
+ // start with up to 3 spaces
+ for i < 3 && data[i] == ' ' {
+ i++
+ }
+
+ // need a *, +, or - followed by a space
+ if (data[i] != '*' && data[i] != '+' && data[i] != '-') ||
+ data[i+1] != ' ' {
+ return 0
+ }
+ return i + 2
+}
+
+// returns ordered list item prefix
+func (p *parser) oliPrefix(data []byte) int {
+ i := 0
+
+ // start with up to 3 spaces
+ for i < 3 && data[i] == ' ' {
+ i++
+ }
+
+ // count the digits
+ start := i
+ for data[i] >= '0' && data[i] <= '9' {
+ i++
+ }
+
+ // we need >= 1 digits followed by a dot and a space
+ if start == i || data[i] != '.' || data[i+1] != ' ' {
+ return 0
+ }
+ return i + 2
+}
+
+// returns definition list item prefix
+func (p *parser) dliPrefix(data []byte) int {
+ i := 0
+
+ // need a : followed by a spaces
+ if data[i] != ':' || data[i+1] != ' ' {
+ return 0
+ }
+ for data[i] == ' ' {
+ i++
+ }
+ return i + 2
+}
+
+// parse ordered or unordered list block
+func (p *parser) list(out *bytes.Buffer, data []byte, flags int) int {
+ i := 0
+ flags |= LIST_ITEM_BEGINNING_OF_LIST
+ work := func() bool {
+ for i < len(data) {
+ skip := p.listItem(out, data[i:], &flags)
+ i += skip
+
+ if skip == 0 || flags&LIST_ITEM_END_OF_LIST != 0 {
+ break
+ }
+ flags &= ^LIST_ITEM_BEGINNING_OF_LIST
+ }
+ return true
+ }
+
+ p.r.List(out, work, flags)
+ return i
+}
+
+// Parse a single list item.
+// Assumes initial prefix is already removed if this is a sublist.
+func (p *parser) listItem(out *bytes.Buffer, data []byte, flags *int) int {
+ // keep track of the indentation of the first line
+ itemIndent := 0
+ for itemIndent < 3 && data[itemIndent] == ' ' {
+ itemIndent++
+ }
+
+ i := p.uliPrefix(data)
+ if i == 0 {
+ i = p.oliPrefix(data)
+ }
+ if i == 0 {
+ i = p.dliPrefix(data)
+ // reset definition term flag
+ if i > 0 {
+ *flags &= ^LIST_TYPE_TERM
+ }
+ }
+ if i == 0 {
+ // if in defnition list, set term flag and continue
+ if *flags&LIST_TYPE_DEFINITION != 0 {
+ *flags |= LIST_TYPE_TERM
+ } else {
+ return 0
+ }
+ }
+
+ // skip leading whitespace on first line
+ for data[i] == ' ' {
+ i++
+ }
+
+ // find the end of the line
+ line := i
+ for i > 0 && data[i-1] != '\n' {
+ i++
+ }
+
+ // get working buffer
+ var raw bytes.Buffer
+
+ // put the first line into the working buffer
+ raw.Write(data[line:i])
+ line = i
+
+ // process the following lines
+ containsBlankLine := false
+ sublist := 0
+
+gatherlines:
+ for line < len(data) {
+ i++
+
+ // find the end of this line
+ for data[i-1] != '\n' {
+ i++
+ }
+
+ // if it is an empty line, guess that it is part of this item
+ // and move on to the next line
+ if p.isEmpty(data[line:i]) > 0 {
+ containsBlankLine = true
+ raw.Write(data[line:i])
+ line = i
+ continue
+ }
+
+ // calculate the indentation
+ indent := 0
+ for indent < 4 && line+indent < i && data[line+indent] == ' ' {
+ indent++
+ }
+
+ chunk := data[line+indent : i]
+
+ // evaluate how this line fits in
+ switch {
+ // is this a nested list item?
+ case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) ||
+ p.oliPrefix(chunk) > 0 ||
+ p.dliPrefix(chunk) > 0:
+
+ if containsBlankLine {
+ *flags |= LIST_ITEM_CONTAINS_BLOCK
+ }
+
+ // to be a nested list, it must be indented more
+ // if not, it is the next item in the same list
+ if indent <= itemIndent {
+ break gatherlines
+ }
+
+ // is this the first item in the nested list?
+ if sublist == 0 {
+ sublist = raw.Len()
+ }
+
+ // is this a nested prefix header?
+ case p.isPrefixHeader(chunk):
+ // if the header is not indented, it is not nested in the list
+ // and thus ends the list
+ if containsBlankLine && indent < 4 {
+ *flags |= LIST_ITEM_END_OF_LIST
+ break gatherlines
+ }
+ *flags |= LIST_ITEM_CONTAINS_BLOCK
+
+ // anything following an empty line is only part
+ // of this item if it is indented 4 spaces
+ // (regardless of the indentation of the beginning of the item)
+ case containsBlankLine && indent < 4:
+ if *flags&LIST_TYPE_DEFINITION != 0 && i < len(data)-1 {
+ // is the next item still a part of this list?
+ next := i
+ for data[next] != '\n' {
+ next++
+ }
+ for next < len(data)-1 && data[next] == '\n' {
+ next++
+ }
+ if i < len(data)-1 && data[i] != ':' && data[next] != ':' {
+ *flags |= LIST_ITEM_END_OF_LIST
+ }
+ } else {
+ *flags |= LIST_ITEM_END_OF_LIST
+ }
+ break gatherlines
+
+ // a blank line means this should be parsed as a block
+ case containsBlankLine:
+ *flags |= LIST_ITEM_CONTAINS_BLOCK
+ }
+
+ containsBlankLine = false
+
+ // add the line into the working buffer without prefix
+ raw.Write(data[line+indent : i])
+
+ line = i
+ }
+
+ rawBytes := raw.Bytes()
+
+ // render the contents of the list item
+ var cooked bytes.Buffer
+ if *flags&LIST_ITEM_CONTAINS_BLOCK != 0 && *flags&LIST_TYPE_TERM == 0 {
+ // intermediate render of block item, except for definition term
+ if sublist > 0 {
+ p.block(&cooked, rawBytes[:sublist])
+ p.block(&cooked, rawBytes[sublist:])
+ } else {
+ p.block(&cooked, rawBytes)
+ }
+ } else {
+ // intermediate render of inline item
+ if sublist > 0 {
+ p.inline(&cooked, rawBytes[:sublist])
+ p.block(&cooked, rawBytes[sublist:])
+ } else {
+ p.inline(&cooked, rawBytes)
+ }
+ }
+
+ // render the actual list item
+ cookedBytes := cooked.Bytes()
+ parsedEnd := len(cookedBytes)
+
+ // strip trailing newlines
+ for parsedEnd > 0 && cookedBytes[parsedEnd-1] == '\n' {
+ parsedEnd--
+ }
+ p.r.ListItem(out, cookedBytes[:parsedEnd], *flags)
+
+ return line
+}
+
+// render a single paragraph that has already been parsed out
+func (p *parser) renderParagraph(out *bytes.Buffer, data []byte) {
+ if len(data) == 0 {
+ return
+ }
+
+ // trim leading spaces
+ beg := 0
+ for data[beg] == ' ' {
+ beg++
+ }
+
+ // trim trailing newline
+ end := len(data) - 1
+
+ // trim trailing spaces
+ for end > beg && data[end-1] == ' ' {
+ end--
+ }
+
+ work := func() bool {
+ p.inline(out, data[beg:end])
+ return true
+ }
+ p.r.Paragraph(out, work)
+}
+
+func (p *parser) paragraph(out *bytes.Buffer, data []byte) int {
+ // prev: index of 1st char of previous line
+ // line: index of 1st char of current line
+ // i: index of cursor/end of current line
+ var prev, line, i int
+
+ // keep going until we find something to mark the end of the paragraph
+ for i < len(data) {
+ // mark the beginning of the current line
+ prev = line
+ current := data[i:]
+ line = i
+
+ // did we find a blank line marking the end of the paragraph?
+ if n := p.isEmpty(current); n > 0 {
+ // did this blank line followed by a definition list item?
+ if p.flags&EXTENSION_DEFINITION_LISTS != 0 {
+ if i < len(data)-1 && data[i+1] == ':' {
+ return p.list(out, data[prev:], LIST_TYPE_DEFINITION)
+ }
+ }
+
+ p.renderParagraph(out, data[:i])
+ return i + n
+ }
+
+ // an underline under some text marks a header, so our paragraph ended on prev line
+ if i > 0 {
+ if level := p.isUnderlinedHeader(current); level > 0 {
+ // render the paragraph
+ p.renderParagraph(out, data[:prev])
+
+ // ignore leading and trailing whitespace
+ eol := i - 1
+ for prev < eol && data[prev] == ' ' {
+ prev++
+ }
+ for eol > prev && data[eol-1] == ' ' {
+ eol--
+ }
+
+ // render the header
+ // this ugly double closure avoids forcing variables onto the heap
+ work := func(o *bytes.Buffer, pp *parser, d []byte) func() bool {
+ return func() bool {
+ pp.inline(o, d)
+ return true
+ }
+ }(out, p, data[prev:eol])
+
+ id := ""
+ if p.flags&EXTENSION_AUTO_HEADER_IDS != 0 {
+ id = sanitized_anchor_name.Create(string(data[prev:eol]))
+ }
+
+ p.r.Header(out, work, level, id)
+
+ // find the end of the underline
+ for data[i] != '\n' {
+ i++
+ }
+ return i
+ }
+ }
+
+ // if the next line starts a block of HTML, then the paragraph ends here
+ if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 {
+ if data[i] == '<' && p.html(out, current, false) > 0 {
+ // rewind to before the HTML block
+ p.renderParagraph(out, data[:i])
+ return i
+ }
+ }
+
+ // if there's a prefixed header or a horizontal rule after this, paragraph is over
+ if p.isPrefixHeader(current) || p.isHRule(current) {
+ p.renderParagraph(out, data[:i])
+ return i
+ }
+
+ // if there's a fenced code block, paragraph is over
+ if p.flags&EXTENSION_FENCED_CODE != 0 {
+ if p.fencedCode(out, current, false) > 0 {
+ p.renderParagraph(out, data[:i])
+ return i
+ }
+ }
+
+ // if there's a definition list item, prev line is a definition term
+ if p.flags&EXTENSION_DEFINITION_LISTS != 0 {
+ if p.dliPrefix(current) != 0 {
+ return p.list(out, data[prev:], LIST_TYPE_DEFINITION)
+ }
+ }
+
+ // if there's a list after this, paragraph is over
+ if p.flags&EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK != 0 {
+ if p.uliPrefix(current) != 0 ||
+ p.oliPrefix(current) != 0 ||
+ p.quotePrefix(current) != 0 ||
+ p.codePrefix(current) != 0 {
+ p.renderParagraph(out, data[:i])
+ return i
+ }
+ }
+
+ // otherwise, scan to the beginning of the next line
+ for data[i] != '\n' {
+ i++
+ }
+ i++
+ }
+
+ p.renderParagraph(out, data[:i])
+ return i
+}
diff --git a/vendor/github.com/russross/blackfriday/block_test.go b/vendor/github.com/russross/blackfriday/block_test.go
new file mode 100644
index 0000000000..f59268ee0b
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/block_test.go
@@ -0,0 +1,1621 @@
+//
+// Blackfriday Markdown Processor
+// Available at http://github.com/russross/blackfriday
+//
+// Copyright © 2011 Russ Ross .
+// Distributed under the Simplified BSD License.
+// See README.md for details.
+//
+
+//
+// Unit tests for block parsing
+//
+
+package blackfriday
+
+import (
+ "strings"
+ "testing"
+)
+
+func runMarkdownBlockWithRenderer(input string, extensions int, renderer Renderer) string {
+ return string(Markdown([]byte(input), renderer, extensions))
+}
+
+func runMarkdownBlock(input string, extensions int) string {
+ htmlFlags := 0
+ htmlFlags |= HTML_USE_XHTML
+
+ renderer := HtmlRenderer(htmlFlags, "", "")
+
+ return runMarkdownBlockWithRenderer(input, extensions, renderer)
+}
+
+func runnerWithRendererParameters(parameters HtmlRendererParameters) func(string, int) string {
+ return func(input string, extensions int) string {
+ htmlFlags := 0
+ htmlFlags |= HTML_USE_XHTML
+
+ renderer := HtmlRendererWithParameters(htmlFlags, "", "", parameters)
+
+ return runMarkdownBlockWithRenderer(input, extensions, renderer)
+ }
+}
+
+func doTestsBlock(t *testing.T, tests []string, extensions int) {
+ doTestsBlockWithRunner(t, tests, extensions, runMarkdownBlock)
+}
+
+func doTestsBlockWithRunner(t *testing.T, tests []string, extensions int, runner func(string, int) string) {
+ // catch and report panics
+ var candidate string
+ defer func() {
+ if err := recover(); err != nil {
+ t.Errorf("\npanic while processing [%#v]: %s\n", candidate, err)
+ }
+ }()
+
+ for i := 0; i+1 < len(tests); i += 2 {
+ input := tests[i]
+ candidate = input
+ expected := tests[i+1]
+ actual := runner(candidate, extensions)
+ if actual != expected {
+ t.Errorf("\nInput [%#v]\nExpected[%#v]\nActual [%#v]",
+ candidate, expected, actual)
+ }
+
+ // now test every substring to stress test bounds checking
+ if !testing.Short() {
+ for start := 0; start < len(input); start++ {
+ for end := start + 1; end <= len(input); end++ {
+ candidate = input[start:end]
+ _ = runMarkdownBlock(candidate, extensions)
+ }
+ }
+ }
+ }
+}
+
+func TestPrefixHeaderNoExtensions(t *testing.T) {
+ var tests = []string{
+ "# Header 1\n",
+ "Header 1 \n",
+
+ "## Header 2\n",
+ "Header 2 \n",
+
+ "### Header 3\n",
+ "Header 3 \n",
+
+ "#### Header 4\n",
+ "Header 4 \n",
+
+ "##### Header 5\n",
+ "Header 5 \n",
+
+ "###### Header 6\n",
+ "Header 6 \n",
+
+ "####### Header 7\n",
+ "# Header 7 \n",
+
+ "#Header 1\n",
+ "Header 1 \n",
+
+ "##Header 2\n",
+ "Header 2 \n",
+
+ "###Header 3\n",
+ "Header 3 \n",
+
+ "####Header 4\n",
+ "Header 4 \n",
+
+ "#####Header 5\n",
+ "Header 5 \n",
+
+ "######Header 6\n",
+ "Header 6 \n",
+
+ "#######Header 7\n",
+ "#Header 7 \n",
+
+ "Hello\n# Header 1\nGoodbye\n",
+ "Hello
\n\nHeader 1 \n\nGoodbye
\n",
+
+ "* List\n# Header\n* List\n",
+ "\nList
\n\nHeader \n\nList
\n \n",
+
+ "* List\n#Header\n* List\n",
+ "\nList
\n\nHeader \n\nList
\n \n",
+
+ "* List\n * Nested list\n # Nested header\n",
+ "\nList
\n\n\nNested list
\n\n" +
+ "Nested header \n \n \n",
+
+ "#Header 1 \\#\n",
+ "Header 1 # \n",
+
+ "#Header 1 \\# foo\n",
+ "Header 1 # foo \n",
+
+ "#Header 1 #\\##\n",
+ "Header 1 ## \n",
+ }
+ doTestsBlock(t, tests, 0)
+}
+
+func TestPrefixHeaderSpaceExtension(t *testing.T) {
+ var tests = []string{
+ "# Header 1\n",
+ "Header 1 \n",
+
+ "## Header 2\n",
+ "Header 2 \n",
+
+ "### Header 3\n",
+ "Header 3 \n",
+
+ "#### Header 4\n",
+ "Header 4 \n",
+
+ "##### Header 5\n",
+ "Header 5 \n",
+
+ "###### Header 6\n",
+ "Header 6 \n",
+
+ "####### Header 7\n",
+ "####### Header 7
\n",
+
+ "#Header 1\n",
+ "#Header 1
\n",
+
+ "##Header 2\n",
+ "##Header 2
\n",
+
+ "###Header 3\n",
+ "###Header 3
\n",
+
+ "####Header 4\n",
+ "####Header 4
\n",
+
+ "#####Header 5\n",
+ "#####Header 5
\n",
+
+ "######Header 6\n",
+ "######Header 6
\n",
+
+ "#######Header 7\n",
+ "#######Header 7
\n",
+
+ "Hello\n# Header 1\nGoodbye\n",
+ "Hello
\n\nHeader 1 \n\nGoodbye
\n",
+
+ "* List\n# Header\n* List\n",
+ "\nList
\n\nHeader \n\nList
\n \n",
+
+ "* List\n#Header\n* List\n",
+ "\n",
+
+ "* List\n * Nested list\n # Nested header\n",
+ "\nList
\n\n\nNested list
\n\n" +
+ "Nested header \n \n \n",
+ }
+ doTestsBlock(t, tests, EXTENSION_SPACE_HEADERS)
+}
+
+func TestPrefixHeaderIdExtension(t *testing.T) {
+ var tests = []string{
+ "# Header 1 {#someid}\n",
+ "Header 1 \n",
+
+ "# Header 1 {#someid} \n",
+ "Header 1 \n",
+
+ "# Header 1 {#someid}\n",
+ "Header 1 \n",
+
+ "# Header 1 {#someid\n",
+ "Header 1 {#someid \n",
+
+ "# Header 1 {#someid\n",
+ "Header 1 {#someid \n",
+
+ "# Header 1 {#someid}}\n",
+ "Header 1 \n\n}
\n",
+
+ "## Header 2 {#someid}\n",
+ "Header 2 \n",
+
+ "### Header 3 {#someid}\n",
+ "Header 3 \n",
+
+ "#### Header 4 {#someid}\n",
+ "Header 4 \n",
+
+ "##### Header 5 {#someid}\n",
+ "Header 5 \n",
+
+ "###### Header 6 {#someid}\n",
+ "Header 6 \n",
+
+ "####### Header 7 {#someid}\n",
+ "# Header 7 \n",
+
+ "# Header 1 # {#someid}\n",
+ "Header 1 \n",
+
+ "## Header 2 ## {#someid}\n",
+ "Header 2 \n",
+
+ "Hello\n# Header 1\nGoodbye\n",
+ "Hello
\n\nHeader 1 \n\nGoodbye
\n",
+
+ "* List\n# Header {#someid}\n* List\n",
+ "\nList
\n\nHeader \n\nList
\n \n",
+
+ "* List\n#Header {#someid}\n* List\n",
+ "\nList
\n\nHeader \n\nList
\n \n",
+
+ "* List\n * Nested list\n # Nested header {#someid}\n",
+ "\nList
\n\n\nNested list
\n\n" +
+ "Nested header \n \n \n",
+ }
+ doTestsBlock(t, tests, EXTENSION_HEADER_IDS)
+}
+
+func TestPrefixHeaderIdExtensionWithPrefixAndSuffix(t *testing.T) {
+ var tests = []string{
+ "# header 1 {#someid}\n",
+ "header 1 \n",
+
+ "## header 2 {#someid}\n",
+ "header 2 \n",
+
+ "### header 3 {#someid}\n",
+ "header 3 \n",
+
+ "#### header 4 {#someid}\n",
+ "header 4 \n",
+
+ "##### header 5 {#someid}\n",
+ "header 5 \n",
+
+ "###### header 6 {#someid}\n",
+ "header 6 \n",
+
+ "####### header 7 {#someid}\n",
+ "# header 7 \n",
+
+ "# header 1 # {#someid}\n",
+ "header 1 \n",
+
+ "## header 2 ## {#someid}\n",
+ "header 2 \n",
+
+ "* List\n# Header {#someid}\n* List\n",
+ "\nList
\n\nHeader \n\nList
\n \n",
+
+ "* List\n#Header {#someid}\n* List\n",
+ "\nList
\n\nHeader \n\nList
\n \n",
+
+ "* List\n * Nested list\n # Nested header {#someid}\n",
+ "\nList
\n\n\nNested list
\n\n" +
+ "Nested header \n \n \n",
+ }
+
+ parameters := HtmlRendererParameters{
+ HeaderIDPrefix: "PRE:",
+ HeaderIDSuffix: ":POST",
+ }
+
+ doTestsBlockWithRunner(t, tests, EXTENSION_HEADER_IDS, runnerWithRendererParameters(parameters))
+}
+
+func TestPrefixAutoHeaderIdExtension(t *testing.T) {
+ var tests = []string{
+ "# Header 1\n",
+ "\n",
+
+ "# Header 1 \n",
+ "\n",
+
+ "## Header 2\n",
+ "\n",
+
+ "### Header 3\n",
+ "\n",
+
+ "#### Header 4\n",
+ "\n",
+
+ "##### Header 5\n",
+ "\n",
+
+ "###### Header 6\n",
+ "\n",
+
+ "####### Header 7\n",
+ "\n",
+
+ "Hello\n# Header 1\nGoodbye\n",
+ "Hello
\n\n\n\nGoodbye
\n",
+
+ "* List\n# Header\n* List\n",
+ "\n",
+
+ "* List\n#Header\n* List\n",
+ "\n",
+
+ "* List\n * Nested list\n # Nested header\n",
+ "\nList
\n\n\nNested list
\n\n" +
+ " \n \n \n",
+
+ "# Header\n\n# Header\n",
+ "\n\n\n",
+
+ "# Header 1\n\n# Header 1",
+ "\n\n\n",
+
+ "# Header\n\n# Header 1\n\n# Header\n\n# Header",
+ "\n\n\n\n\n\n\n",
+ }
+ doTestsBlock(t, tests, EXTENSION_AUTO_HEADER_IDS)
+}
+
+func TestPrefixAutoHeaderIdExtensionWithPrefixAndSuffix(t *testing.T) {
+ var tests = []string{
+ "# Header 1\n",
+ "\n",
+
+ "# Header 1 \n",
+ "\n",
+
+ "## Header 2\n",
+ "\n",
+
+ "### Header 3\n",
+ "\n",
+
+ "#### Header 4\n",
+ "\n",
+
+ "##### Header 5\n",
+ "\n",
+
+ "###### Header 6\n",
+ "\n",
+
+ "####### Header 7\n",
+ "\n",
+
+ "Hello\n# Header 1\nGoodbye\n",
+ "Hello
\n\n\n\nGoodbye
\n",
+
+ "* List\n# Header\n* List\n",
+ "\n",
+
+ "* List\n#Header\n* List\n",
+ "\n",
+
+ "* List\n * Nested list\n # Nested header\n",
+ "\nList
\n\n\nNested list
\n\n" +
+ " \n \n \n",
+
+ "# Header\n\n# Header\n",
+ "\n\n\n",
+
+ "# Header 1\n\n# Header 1",
+ "\n\n\n",
+
+ "# Header\n\n# Header 1\n\n# Header\n\n# Header",
+ "\n\n\n\n\n\n\n",
+ }
+
+ parameters := HtmlRendererParameters{
+ HeaderIDPrefix: "PRE:",
+ HeaderIDSuffix: ":POST",
+ }
+
+ doTestsBlockWithRunner(t, tests, EXTENSION_AUTO_HEADER_IDS, runnerWithRendererParameters(parameters))
+}
+
+func TestPrefixMultipleHeaderExtensions(t *testing.T) {
+ var tests = []string{
+ "# Header\n\n# Header {#header}\n\n# Header 1",
+ "\n\n\n\n\n",
+ }
+ doTestsBlock(t, tests, EXTENSION_AUTO_HEADER_IDS|EXTENSION_HEADER_IDS)
+}
+
+func TestUnderlineHeaders(t *testing.T) {
+ var tests = []string{
+ "Header 1\n========\n",
+ "Header 1 \n",
+
+ "Header 2\n--------\n",
+ "Header 2 \n",
+
+ "A\n=\n",
+ "A \n",
+
+ "B\n-\n",
+ "B \n",
+
+ "Paragraph\nHeader\n=\n",
+ "Paragraph
\n\nHeader \n",
+
+ "Header\n===\nParagraph\n",
+ "Header \n\nParagraph
\n",
+
+ "Header\n===\nAnother header\n---\n",
+ "Header \n\nAnother header \n",
+
+ " Header\n======\n",
+ "Header \n",
+
+ " Code\n========\n",
+ "Code\n
\n\n========
\n",
+
+ "Header with *inline*\n=====\n",
+ "Header with inline \n",
+
+ "* List\n * Sublist\n Not a header\n ------\n",
+ "\nList\n\n\nSublist\nNot a header\n------ \n \n \n",
+
+ "Paragraph\n\n\n\n\nHeader\n===\n",
+ "Paragraph
\n\nHeader \n",
+
+ "Trailing space \n==== \n\n",
+ "Trailing space \n",
+
+ "Trailing spaces\n==== \n\n",
+ "Trailing spaces \n",
+
+ "Double underline\n=====\n=====\n",
+ "Double underline \n\n=====
\n",
+ }
+ doTestsBlock(t, tests, 0)
+}
+
+func TestUnderlineHeadersAutoIDs(t *testing.T) {
+ var tests = []string{
+ "Header 1\n========\n",
+ "\n",
+
+ "Header 2\n--------\n",
+ "\n",
+
+ "A\n=\n",
+ "A \n",
+
+ "B\n-\n",
+ "B \n",
+
+ "Paragraph\nHeader\n=\n",
+ "Paragraph
\n\n\n",
+
+ "Header\n===\nParagraph\n",
+ "\n\nParagraph
\n",
+
+ "Header\n===\nAnother header\n---\n",
+ "\n\n\n",
+
+ " Header\n======\n",
+ "\n",
+
+ "Header with *inline*\n=====\n",
+ "\n",
+
+ "Paragraph\n\n\n\n\nHeader\n===\n",
+ "Paragraph
\n\n\n",
+
+ "Trailing space \n==== \n\n",
+ "Trailing space \n",
+
+ "Trailing spaces\n==== \n\n",
+ "Trailing spaces \n",
+
+ "Double underline\n=====\n=====\n",
+ "Double underline \n\n=====
\n",
+
+ "Header\n======\n\nHeader\n======\n",
+ "\n\n\n",
+
+ "Header 1\n========\n\nHeader 1\n========\n",
+ "\n\n\n",
+ }
+ doTestsBlock(t, tests, EXTENSION_AUTO_HEADER_IDS)
+}
+
+func TestHorizontalRule(t *testing.T) {
+ var tests = []string{
+ "-\n",
+ "-
\n",
+
+ "--\n",
+ "--
\n",
+
+ "---\n",
+ " \n",
+
+ "----\n",
+ " \n",
+
+ "*\n",
+ "*
\n",
+
+ "**\n",
+ "**
\n",
+
+ "***\n",
+ " \n",
+
+ "****\n",
+ " \n",
+
+ "_\n",
+ "_
\n",
+
+ "__\n",
+ "__
\n",
+
+ "___\n",
+ " \n",
+
+ "____\n",
+ " \n",
+
+ "-*-\n",
+ "-*-
\n",
+
+ "- - -\n",
+ " \n",
+
+ "* * *\n",
+ " \n",
+
+ "_ _ _\n",
+ " \n",
+
+ "-----*\n",
+ "-----*
\n",
+
+ " ------ \n",
+ " \n",
+
+ "Hello\n***\n",
+ "Hello
\n\n \n",
+
+ "---\n***\n___\n",
+ " \n\n \n\n \n",
+ }
+ doTestsBlock(t, tests, 0)
+}
+
+func TestUnorderedList(t *testing.T) {
+ var tests = []string{
+ "* Hello\n",
+ "\n",
+
+ "* Yin\n* Yang\n",
+ "\n",
+
+ "* Ting\n* Bong\n* Goo\n",
+ "\n",
+
+ "* Yin\n\n* Yang\n",
+ "\n",
+
+ "* Ting\n\n* Bong\n* Goo\n",
+ "\n",
+
+ "+ Hello\n",
+ "\n",
+
+ "+ Yin\n+ Yang\n",
+ "\n",
+
+ "+ Ting\n+ Bong\n+ Goo\n",
+ "\n",
+
+ "+ Yin\n\n+ Yang\n",
+ "\n",
+
+ "+ Ting\n\n+ Bong\n+ Goo\n",
+ "\n",
+
+ "- Hello\n",
+ "\n",
+
+ "- Yin\n- Yang\n",
+ "\n",
+
+ "- Ting\n- Bong\n- Goo\n",
+ "\n",
+
+ "- Yin\n\n- Yang\n",
+ "\n",
+
+ "- Ting\n\n- Bong\n- Goo\n",
+ "\n",
+
+ "*Hello\n",
+ "*Hello
\n",
+
+ "* Hello \n",
+ "\n",
+
+ "* Hello \n Next line \n",
+ "\n",
+
+ "Paragraph\n* No linebreak\n",
+ "Paragraph\n* No linebreak
\n",
+
+ "Paragraph\n\n* Linebreak\n",
+ "Paragraph
\n\n\n",
+
+ "* List\n * Nested list\n",
+ "\n",
+
+ "* List\n\n * Nested list\n",
+ "\n",
+
+ "* List\n Second line\n\n + Nested\n",
+ "\nList\nSecond line
\n\n \n \n",
+
+ "* List\n + Nested\n\n Continued\n",
+ "\nList
\n\n\n\nContinued
\n \n",
+
+ "* List\n * shallow indent\n",
+ "\n",
+
+ "* List\n" +
+ " * shallow indent\n" +
+ " * part of second list\n" +
+ " * still second\n" +
+ " * almost there\n" +
+ " * third level\n",
+ "\n" +
+ "List\n\n" +
+ "\n" +
+ "shallow indent \n" +
+ "part of second list \n" +
+ "still second \n" +
+ "almost there\n\n" +
+ "\n" +
+ "third level \n" +
+ " \n" +
+ " \n" +
+ " \n",
+
+ "* List\n extra indent, same paragraph\n",
+ "\nList\n extra indent, same paragraph \n \n",
+
+ "* List\n\n code block\n",
+ "\n",
+
+ "* List\n\n code block with spaces\n",
+ "\nList
\n\n code block with spaces\n
\n \n",
+
+ "* List\n\n * sublist\n\n normal text\n\n * another sublist\n",
+ "\nList
\n\n\n\nnormal text
\n\n \n \n",
+
+ `* Foo
+
+ bar
+
+ qux
+`,
+ `
+`,
+ }
+ doTestsBlock(t, tests, 0)
+}
+
+func TestFencedCodeBlockWithinList(t *testing.T) {
+ doTestsBlock(t, []string{
+ "* Foo\n\n ```\n bar\n\n qux\n ```\n",
+ `
+`,
+ }, EXTENSION_FENCED_CODE)
+}
+
+func TestOrderedList(t *testing.T) {
+ var tests = []string{
+ "1. Hello\n",
+ "\nHello \n \n",
+
+ "1. Yin\n2. Yang\n",
+ "\nYin \nYang \n \n",
+
+ "1. Ting\n2. Bong\n3. Goo\n",
+ "\nTing \nBong \nGoo \n \n",
+
+ "1. Yin\n\n2. Yang\n",
+ "\nYin
\n\nYang
\n \n",
+
+ "1. Ting\n\n2. Bong\n3. Goo\n",
+ "\nTing
\n\nBong
\n\nGoo
\n \n",
+
+ "1 Hello\n",
+ "1 Hello
\n",
+
+ "1.Hello\n",
+ "1.Hello
\n",
+
+ "1. Hello \n",
+ "\nHello \n \n",
+
+ "1. Hello \n Next line \n",
+ "\nHello\nNext line \n \n",
+
+ "Paragraph\n1. No linebreak\n",
+ "Paragraph\n1. No linebreak
\n",
+
+ "Paragraph\n\n1. Linebreak\n",
+ "Paragraph
\n\n\nLinebreak \n \n",
+
+ "1. List\n 1. Nested list\n",
+ "\nList\n\n\nNested list \n \n \n",
+
+ "1. List\n\n 1. Nested list\n",
+ "\nList
\n\n\nNested list \n \n \n",
+
+ "1. List\n Second line\n\n 1. Nested\n",
+ "\nList\nSecond line
\n\n\nNested \n \n \n",
+
+ "1. List\n 1. Nested\n\n Continued\n",
+ "\nList
\n\n\nNested \n \n\nContinued
\n \n",
+
+ "1. List\n 1. shallow indent\n",
+ "\nList\n\n\nshallow indent \n \n \n",
+
+ "1. List\n" +
+ " 1. shallow indent\n" +
+ " 2. part of second list\n" +
+ " 3. still second\n" +
+ " 4. almost there\n" +
+ " 1. third level\n",
+ "\n" +
+ "List\n\n" +
+ "\n" +
+ "shallow indent \n" +
+ "part of second list \n" +
+ "still second \n" +
+ "almost there\n\n" +
+ "\n" +
+ "third level \n" +
+ " \n" +
+ " \n" +
+ " \n",
+
+ "1. List\n extra indent, same paragraph\n",
+ "\nList\n extra indent, same paragraph \n \n",
+
+ "1. List\n\n code block\n",
+ "\nList
\n\ncode block\n
\n \n",
+
+ "1. List\n\n code block with spaces\n",
+ "\nList
\n\n code block with spaces\n
\n \n",
+
+ "1. List\n * Mixted list\n",
+ "\nList\n\n \n \n",
+
+ "1. List\n * Mixed list\n",
+ "\nList\n\n \n \n",
+
+ "* Start with unordered\n 1. Ordered\n",
+ "\nStart with unordered\n\n\nOrdered \n \n \n",
+
+ "* Start with unordered\n 1. Ordered\n",
+ "\nStart with unordered\n\n\nOrdered \n \n \n",
+
+ "1. numbers\n1. are ignored\n",
+ "\nnumbers \nare ignored \n \n",
+
+ `1. Foo
+
+ bar
+
+
+
+ qux
+`,
+ `
+Foo
+
+bar
+
+
+
+qux
+
+
+`,
+ }
+ doTestsBlock(t, tests, 0)
+}
+
+func TestDefinitionList(t *testing.T) {
+ var tests = []string{
+ "Term 1\n: Definition a\n",
+ "\nTerm 1 \nDefinition a \n \n",
+
+ "Term 1\n: Definition a \n",
+ "\nTerm 1 \nDefinition a \n \n",
+
+ "Term 1\n: Definition a\n: Definition b\n",
+ "\nTerm 1 \nDefinition a \nDefinition b \n \n",
+
+ "Term 1\n: Definition a\n\nTerm 2\n: Definition b\n",
+ "\n" +
+ "Term 1 \n" +
+ "Definition a \n" +
+ "Term 2 \n" +
+ "Definition b \n" +
+ " \n",
+
+ "Term 1\n: Definition a\n\nTerm 2\n: Definition b\n\nTerm 3\n: Definition c\n",
+ "\n" +
+ "Term 1 \n" +
+ "Definition a \n" +
+ "Term 2 \n" +
+ "Definition b \n" +
+ "Term 3 \n" +
+ "Definition c \n" +
+ " \n",
+
+ "Term 1\n: Definition a\n: Definition b\n\nTerm 2\n: Definition c\n",
+ "\n" +
+ "Term 1 \n" +
+ "Definition a \n" +
+ "Definition b \n" +
+ "Term 2 \n" +
+ "Definition c \n" +
+ " \n",
+
+ "Term 1\n\n: Definition a\n\nTerm 2\n\n: Definition b\n",
+ "\n" +
+ "Term 1 \n" +
+ "Definition a
\n" +
+ "Term 2 \n" +
+ "Definition b
\n" +
+ " \n",
+
+ "Term 1\n\n: Definition a\n\n: Definition b\n\nTerm 2\n\n: Definition c\n",
+ "\n" +
+ "Term 1 \n" +
+ "Definition a
\n" +
+ "Definition b
\n" +
+ "Term 2 \n" +
+ "Definition c
\n" +
+ " \n",
+
+ "Term 1\n: Definition a\nNext line\n",
+ "\nTerm 1 \nDefinition a\nNext line \n \n",
+
+ "Term 1\n: Definition a\n Next line\n",
+ "\nTerm 1 \nDefinition a\nNext line \n \n",
+
+ "Term 1\n: Definition a \n Next line \n",
+ "\nTerm 1 \nDefinition a\nNext line \n \n",
+
+ "Term 1\n: Definition a\nNext line\n\nTerm 2\n: Definition b",
+ "\n" +
+ "Term 1 \n" +
+ "Definition a\nNext line \n" +
+ "Term 2 \n" +
+ "Definition b \n" +
+ " \n",
+
+ "Term 1\n: Definition a\n",
+ "\nTerm 1 \nDefinition a \n \n",
+
+ "Term 1\n:Definition a\n",
+ "Term 1\n:Definition a
\n",
+
+ "Term 1\n\n: Definition a\n\nTerm 2\n\n: Definition b\n\nText 1",
+ "\n" +
+ "Term 1 \n" +
+ "Definition a
\n" +
+ "Term 2 \n" +
+ "Definition b
\n" +
+ " \n" +
+ "\nText 1
\n",
+
+ "Term 1\n\n: Definition a\n\nText 1\n\nTerm 2\n\n: Definition b\n\nText 2",
+ "\n" +
+ "Term 1 \n" +
+ "Definition a
\n" +
+ " \n" +
+ "\nText 1
\n" +
+ "\n\n" +
+ "Term 2 \n" +
+ "Definition b
\n" +
+ " \n" +
+ "\nText 2
\n",
+ }
+ doTestsBlock(t, tests, EXTENSION_DEFINITION_LISTS)
+}
+
+func TestPreformattedHtml(t *testing.T) {
+ var tests = []string{
+ "
\n",
+ "
\n",
+
+ "\n
\n",
+ "\n
\n",
+
+ "\n
\nParagraph\n",
+ "
\n
\nParagraph
\n",
+
+ "\n
\n",
+ "\n
\n",
+
+ "\nAnything here\n
\n",
+ "\nAnything here\n
\n",
+
+ "\n Anything here\n
\n",
+ "\n Anything here\n
\n",
+
+ "\nAnything here\n
\n",
+ "\nAnything here\n
\n",
+
+ "\nThis is *not* &proceessed\n
\n",
+ "\nThis is *not* &proceessed\n
\n",
+
+ "\n Something\n \n",
+ "\n Something\n
\n",
+
+ "\n Something here\n\n",
+ "
\n Something here\n\n",
+
+ "Paragraph\n
\nHere? >&<\n
\n",
+ "
Paragraph\n
\nHere? >&<\n
\n",
+
+ "Paragraph\n\n
\nHow about here? >&<\n
\n",
+ "
Paragraph
\n\n
\nHow about here? >&<\n
\n",
+
+ "Paragraph\n
\nHere? >&<\n
\nAnd here?\n",
+ "
Paragraph\n
\nHere? >&<\n
\nAnd here?\n",
+
+ "Paragraph\n\n
\nHow about here? >&<\n
\nAnd here?\n",
+ "
Paragraph
\n\n
\nHow about here? >&<\n
\nAnd here?\n",
+
+ "Paragraph\n
\nHere? >&<\n
\n\nAnd here?\n",
+ "
Paragraph\n
\nHere? >&<\n
\n\n
And here?
\n",
+
+ "Paragraph\n\n
\nHow about here? >&<\n
\n\nAnd here?\n",
+ "
Paragraph
\n\n
\nHow about here? >&<\n
\n\n
And here?
\n",
+ }
+ doTestsBlock(t, tests, 0)
+}
+
+func TestPreformattedHtmlLax(t *testing.T) {
+ var tests = []string{
+ "Paragraph\n
\nHere? >&<\n
\n",
+ "
Paragraph
\n\n
\nHere? >&<\n
\n",
+
+ "Paragraph\n\n
\nHow about here? >&<\n
\n",
+ "
Paragraph
\n\n
\nHow about here? >&<\n
\n",
+
+ "Paragraph\n
\nHere? >&<\n
\nAnd here?\n",
+ "
Paragraph
\n\n
\nHere? >&<\n
\n\n
And here?
\n",
+
+ "Paragraph\n\n
\nHow about here? >&<\n
\nAnd here?\n",
+ "
Paragraph
\n\n
\nHow about here? >&<\n
\n\n
And here?
\n",
+
+ "Paragraph\n
\nHere? >&<\n
\n\nAnd here?\n",
+ "
Paragraph
\n\n
\nHere? >&<\n
\n\n
And here?
\n",
+
+ "Paragraph\n\n
\nHow about here? >&<\n
\n\nAnd here?\n",
+ "
Paragraph
\n\n
\nHow about here? >&<\n
\n\n
And here?
\n",
+ }
+ doTestsBlock(t, tests, EXTENSION_LAX_HTML_BLOCKS)
+}
+
+func TestFencedCodeBlock(t *testing.T) {
+ var tests = []string{
+ "``` go\nfunc foo() bool {\n\treturn true;\n}\n```\n",
+ "
func foo() bool {\n\treturn true;\n}\n
\n",
+
+ "``` c\n/* special & char < > \" escaping */\n```\n",
+ "
/* special & char < > " escaping */\n
\n",
+
+ "``` c\nno *inline* processing ~~of text~~\n```\n",
+ "
no *inline* processing ~~of text~~\n
\n",
+
+ "```\nNo language\n```\n",
+ "
No language\n
\n",
+
+ "``` {ocaml}\nlanguage in braces\n```\n",
+ "
language in braces\n
\n",
+
+ "``` {ocaml} \nwith extra whitespace\n```\n",
+ "
with extra whitespace\n
\n",
+
+ "```{ ocaml }\nwith extra whitespace\n```\n",
+ "
with extra whitespace\n
\n",
+
+ "~ ~~ java\nWith whitespace\n~~~\n",
+ "
~ ~~ java\nWith whitespace\n~~~
\n",
+
+ "~~\nonly two\n~~\n",
+ "
~~\nonly two\n~~
\n",
+
+ "```` python\nextra\n````\n",
+ "
extra\n
\n",
+
+ "~~~ perl\nthree to start, four to end\n~~~~\n",
+ "
~~~ perl\nthree to start, four to end\n~~~~
\n",
+
+ "~~~~ perl\nfour to start, three to end\n~~~\n",
+ "
~~~~ perl\nfour to start, three to end\n~~~
\n",
+
+ "~~~ bash\ntildes\n~~~\n",
+ "
tildes\n
\n",
+
+ "``` lisp\nno ending\n",
+ "
``` lisp\nno ending
\n",
+
+ "~~~ lisp\nend with language\n~~~ lisp\n",
+ "
~~~ lisp\nend with language\n~~~ lisp
\n",
+
+ "```\nmismatched begin and end\n~~~\n",
+ "
```\nmismatched begin and end\n~~~
\n",
+
+ "~~~\nmismatched begin and end\n```\n",
+ "
~~~\nmismatched begin and end\n```
\n",
+
+ " ``` oz\nleading spaces\n```\n",
+ "
leading spaces\n
\n",
+
+ " ``` oz\nleading spaces\n ```\n",
+ "
leading spaces\n
\n",
+
+ " ``` oz\nleading spaces\n ```\n",
+ "
leading spaces\n
\n",
+
+ "``` oz\nleading spaces\n ```\n",
+ "
leading spaces\n
\n",
+
+ " ``` oz\nleading spaces\n ```\n",
+ "
``` oz\n
\n\n
leading spaces\n ```
\n",
+
+ "Bla bla\n\n``` oz\ncode blocks breakup paragraphs\n```\n\nBla Bla\n",
+ "
Bla bla
\n\n
code blocks breakup paragraphs\n
\n\n
Bla Bla
\n",
+
+ "Some text before a fenced code block\n``` oz\ncode blocks breakup paragraphs\n```\nAnd some text after a fenced code block",
+ "
Some text before a fenced code block
\n\n
code blocks breakup paragraphs\n
\n\n
And some text after a fenced code block
\n",
+
+ "`",
+ "
`
\n",
+
+ "Bla bla\n\n``` oz\ncode blocks breakup paragraphs\n```\n\nBla Bla\n\n``` oz\nmultiple code blocks work okay\n```\n\nBla Bla\n",
+ "
Bla bla
\n\n
code blocks breakup paragraphs\n
\n\n
Bla Bla
\n\n
multiple code blocks work okay\n
\n\n
Bla Bla
\n",
+
+ "Some text before a fenced code block\n``` oz\ncode blocks breakup paragraphs\n```\nSome text in between\n``` oz\nmultiple code blocks work okay\n```\nAnd some text after a fenced code block",
+ "
Some text before a fenced code block
\n\n
code blocks breakup paragraphs\n
\n\n
Some text in between
\n\n
multiple code blocks work okay\n
\n\n
And some text after a fenced code block
\n",
+ }
+ doTestsBlock(t, tests, EXTENSION_FENCED_CODE)
+}
+
+func TestFencedCodeInsideBlockquotes(t *testing.T) {
+ cat := func(s ...string) string { return strings.Join(s, "\n") }
+ var tests = []string{
+ cat("> ```go",
+ "package moo",
+ "",
+ "```",
+ ""),
+ `
+package moo
+
+
+
+`,
+ // -------------------------------------------
+ cat("> foo",
+ "> ",
+ "> ```go",
+ "package moo",
+ "```",
+ "> ",
+ "> goo.",
+ ""),
+ `
+foo
+
+package moo
+
+
+goo.
+
+`,
+ // -------------------------------------------
+ cat("> foo",
+ "> ",
+ "> quote",
+ "continues",
+ "```",
+ ""),
+ `
+foo
+
+quote
+continues
+` + "```" + `
+
+`,
+ // -------------------------------------------
+ cat("> foo",
+ "> ",
+ "> ```go",
+ "package moo",
+ "```",
+ "> ",
+ "> goo.",
+ "> ",
+ "> ```go",
+ "package zoo",
+ "```",
+ "> ",
+ "> woo.",
+ ""),
+ `
+foo
+
+package moo
+
+
+goo.
+
+package zoo
+
+
+woo.
+
+`,
+ }
+
+ // These 2 alternative forms of blockquoted fenced code blocks should produce same output.
+ forms := [2]string{
+ cat("> plain quoted text",
+ "> ```fenced",
+ "code",
+ " with leading single space correctly preserved",
+ "okay",
+ "```",
+ "> rest of quoted text"),
+ cat("> plain quoted text",
+ "> ```fenced",
+ "> code",
+ "> with leading single space correctly preserved",
+ "> okay",
+ "> ```",
+ "> rest of quoted text"),
+ }
+ want := `
+plain quoted text
+
+code
+ with leading single space correctly preserved
+okay
+
+
+rest of quoted text
+
+`
+ tests = append(tests, forms[0], want)
+ tests = append(tests, forms[1], want)
+
+ doTestsBlock(t, tests, EXTENSION_FENCED_CODE)
+}
+
+func TestTable(t *testing.T) {
+ var tests = []string{
+ "a | b\n---|---\nc | d\n",
+ "
\n\n\na \nb \n \n \n\n" +
+ "\n\nc \nd \n \n \n
\n",
+
+ "a | b\n---|--\nc | d\n",
+ "
a | b\n---|--\nc | d
\n",
+
+ "|a|b|c|d|\n|----|----|----|---|\n|e|f|g|h|\n",
+ "
\n\n\na \nb \nc \nd \n \n \n\n" +
+ "\n\ne \nf \ng \nh \n \n \n
\n",
+
+ "*a*|__b__|[c](C)|d\n---|---|---|---\ne|f|g|h\n",
+ "
\n\n\na \nb \nc \nd \n \n \n\n" +
+ "\n\ne \nf \ng \nh \n \n \n
\n",
+
+ "a|b|c\n---|---|---\nd|e|f\ng|h\ni|j|k|l|m\nn|o|p\n",
+ "
\n\n\na \nb \nc \n \n \n\n" +
+ "\n\nd \ne \nf \n \n\n" +
+ "\ng \nh \n \n \n\n" +
+ "\ni \nj \nk \n \n\n" +
+ "\nn \no \np \n \n \n
\n",
+
+ "a|b|c\n---|---|---\n*d*|__e__|f\n",
+ "
\n\n\na \nb \nc \n \n \n\n" +
+ "\n\nd \ne \nf \n \n \n
\n",
+
+ "a|b|c|d\n:--|--:|:-:|---\ne|f|g|h\n",
+ "
\n\n\na \nb \n" +
+ "c \nd \n \n \n\n" +
+ "\n\ne \nf \n" +
+ "g \nh \n \n \n
\n",
+
+ "a|b|c\n---|---|---\n",
+ "
\n\n\na \nb \nc \n \n \n\n\n \n
\n",
+
+ "a| b|c | d | e\n---|---|---|---|---\nf| g|h | i |j\n",
+ "
\n\n\na \nb \nc \nd \ne \n \n \n\n" +
+ "\n\nf \ng \nh \ni \nj \n \n \n
\n",
+
+ "a|b\\|c|d\n---|---|---\nf|g\\|h|i\n",
+ "
\n\n\na \nb|c \nd \n \n \n\n\n\nf \ng|h \ni \n \n \n
\n",
+ }
+ doTestsBlock(t, tests, EXTENSION_TABLES)
+}
+
+func TestUnorderedListWith_EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK(t *testing.T) {
+ var tests = []string{
+ "* Hello\n",
+ "
\n",
+
+ "* Yin\n* Yang\n",
+ "
\n",
+
+ "* Ting\n* Bong\n* Goo\n",
+ "
\n",
+
+ "* Yin\n\n* Yang\n",
+ "
\n",
+
+ "* Ting\n\n* Bong\n* Goo\n",
+ "
\n",
+
+ "+ Hello\n",
+ "
\n",
+
+ "+ Yin\n+ Yang\n",
+ "
\n",
+
+ "+ Ting\n+ Bong\n+ Goo\n",
+ "
\n",
+
+ "+ Yin\n\n+ Yang\n",
+ "
\n",
+
+ "+ Ting\n\n+ Bong\n+ Goo\n",
+ "
\n",
+
+ "- Hello\n",
+ "
\n",
+
+ "- Yin\n- Yang\n",
+ "
\n",
+
+ "- Ting\n- Bong\n- Goo\n",
+ "
\n",
+
+ "- Yin\n\n- Yang\n",
+ "
\n",
+
+ "- Ting\n\n- Bong\n- Goo\n",
+ "
\n",
+
+ "*Hello\n",
+ "
*Hello
\n",
+
+ "* Hello \n",
+ "
\n",
+
+ "* Hello \n Next line \n",
+ "
\n",
+
+ "Paragraph\n* No linebreak\n",
+ "
Paragraph
\n\n
\n",
+
+ "Paragraph\n\n* Linebreak\n",
+ "
Paragraph
\n\n
\n",
+
+ "* List\n * Nested list\n",
+ "
\n",
+
+ "* List\n\n * Nested list\n",
+ "
\n",
+
+ "* List\n Second line\n\n + Nested\n",
+ "
\nList\nSecond line
\n\n \n \n",
+
+ "* List\n + Nested\n\n Continued\n",
+ "
\nList
\n\n\n\nContinued
\n \n",
+
+ "* List\n * shallow indent\n",
+ "
\n",
+
+ "* List\n" +
+ " * shallow indent\n" +
+ " * part of second list\n" +
+ " * still second\n" +
+ " * almost there\n" +
+ " * third level\n",
+ "
\n" +
+ "List\n\n" +
+ "\n" +
+ "shallow indent \n" +
+ "part of second list \n" +
+ "still second \n" +
+ "almost there\n\n" +
+ "\n" +
+ "third level \n" +
+ " \n" +
+ " \n" +
+ " \n",
+
+ "* List\n extra indent, same paragraph\n",
+ "
\nList\n extra indent, same paragraph \n \n",
+
+ "* List\n\n code block\n",
+ "
\n",
+
+ "* List\n\n code block with spaces\n",
+ "
\nList
\n\n code block with spaces\n
\n \n",
+
+ "* List\n\n * sublist\n\n normal text\n\n * another sublist\n",
+ "
\nList
\n\n\n\nnormal text
\n\n \n \n",
+ }
+ doTestsBlock(t, tests, EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK)
+}
+
+func TestOrderedList_EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK(t *testing.T) {
+ var tests = []string{
+ "1. Hello\n",
+ "
\nHello \n \n",
+
+ "1. Yin\n2. Yang\n",
+ "
\nYin \nYang \n \n",
+
+ "1. Ting\n2. Bong\n3. Goo\n",
+ "
\nTing \nBong \nGoo \n \n",
+
+ "1. Yin\n\n2. Yang\n",
+ "
\nYin
\n\nYang
\n \n",
+
+ "1. Ting\n\n2. Bong\n3. Goo\n",
+ "
\nTing
\n\nBong
\n\nGoo
\n \n",
+
+ "1 Hello\n",
+ "
1 Hello
\n",
+
+ "1.Hello\n",
+ "
1.Hello
\n",
+
+ "1. Hello \n",
+ "
\nHello \n \n",
+
+ "1. Hello \n Next line \n",
+ "
\nHello\nNext line \n \n",
+
+ "Paragraph\n1. No linebreak\n",
+ "
Paragraph
\n\n
\nNo linebreak \n \n",
+
+ "Paragraph\n\n1. Linebreak\n",
+ "
Paragraph
\n\n
\nLinebreak \n \n",
+
+ "1. List\n 1. Nested list\n",
+ "
\nList\n\n\nNested list \n \n \n",
+
+ "1. List\n\n 1. Nested list\n",
+ "
\nList
\n\n\nNested list \n \n \n",
+
+ "1. List\n Second line\n\n 1. Nested\n",
+ "
\nList\nSecond line
\n\n\nNested \n \n \n",
+
+ "1. List\n 1. Nested\n\n Continued\n",
+ "
\nList
\n\n\nNested \n \n\nContinued
\n \n",
+
+ "1. List\n 1. shallow indent\n",
+ "
\nList\n\n\nshallow indent \n \n \n",
+
+ "1. List\n" +
+ " 1. shallow indent\n" +
+ " 2. part of second list\n" +
+ " 3. still second\n" +
+ " 4. almost there\n" +
+ " 1. third level\n",
+ "
\n" +
+ "List\n\n" +
+ "\n" +
+ "shallow indent \n" +
+ "part of second list \n" +
+ "still second \n" +
+ "almost there\n\n" +
+ "\n" +
+ "third level \n" +
+ " \n" +
+ " \n" +
+ " \n",
+
+ "1. List\n extra indent, same paragraph\n",
+ "
\nList\n extra indent, same paragraph \n \n",
+
+ "1. List\n\n code block\n",
+ "
\nList
\n\ncode block\n
\n \n",
+
+ "1. List\n\n code block with spaces\n",
+ "
\nList
\n\n code block with spaces\n
\n \n",
+
+ "1. List\n * Mixted list\n",
+ "
\nList\n\n \n \n",
+
+ "1. List\n * Mixed list\n",
+ "
\nList\n\n \n \n",
+
+ "* Start with unordered\n 1. Ordered\n",
+ "
\nStart with unordered\n\n\nOrdered \n \n \n",
+
+ "* Start with unordered\n 1. Ordered\n",
+ "
\nStart with unordered\n\n\nOrdered \n \n \n",
+
+ "1. numbers\n1. are ignored\n",
+ "
\nnumbers \nare ignored \n \n",
+ }
+ doTestsBlock(t, tests, EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK)
+}
+
+func TestFencedCodeBlock_EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK(t *testing.T) {
+ var tests = []string{
+ "``` go\nfunc foo() bool {\n\treturn true;\n}\n```\n",
+ "
func foo() bool {\n\treturn true;\n}\n
\n",
+
+ "``` c\n/* special & char < > \" escaping */\n```\n",
+ "
/* special & char < > " escaping */\n
\n",
+
+ "``` c\nno *inline* processing ~~of text~~\n```\n",
+ "
no *inline* processing ~~of text~~\n
\n",
+
+ "```\nNo language\n```\n",
+ "
No language\n
\n",
+
+ "``` {ocaml}\nlanguage in braces\n```\n",
+ "
language in braces\n
\n",
+
+ "``` {ocaml} \nwith extra whitespace\n```\n",
+ "
with extra whitespace\n
\n",
+
+ "```{ ocaml }\nwith extra whitespace\n```\n",
+ "
with extra whitespace\n
\n",
+
+ "~ ~~ java\nWith whitespace\n~~~\n",
+ "
~ ~~ java\nWith whitespace\n~~~
\n",
+
+ "~~\nonly two\n~~\n",
+ "
~~\nonly two\n~~
\n",
+
+ "```` python\nextra\n````\n",
+ "
extra\n
\n",
+
+ "~~~ perl\nthree to start, four to end\n~~~~\n",
+ "
~~~ perl\nthree to start, four to end\n~~~~
\n",
+
+ "~~~~ perl\nfour to start, three to end\n~~~\n",
+ "
~~~~ perl\nfour to start, three to end\n~~~
\n",
+
+ "~~~ bash\ntildes\n~~~\n",
+ "
tildes\n
\n",
+
+ "``` lisp\nno ending\n",
+ "
``` lisp\nno ending
\n",
+
+ "~~~ lisp\nend with language\n~~~ lisp\n",
+ "
~~~ lisp\nend with language\n~~~ lisp
\n",
+
+ "```\nmismatched begin and end\n~~~\n",
+ "
```\nmismatched begin and end\n~~~
\n",
+
+ "~~~\nmismatched begin and end\n```\n",
+ "
~~~\nmismatched begin and end\n```
\n",
+
+ " ``` oz\nleading spaces\n```\n",
+ "
leading spaces\n
\n",
+
+ " ``` oz\nleading spaces\n ```\n",
+ "
leading spaces\n
\n",
+
+ " ``` oz\nleading spaces\n ```\n",
+ "
leading spaces\n
\n",
+
+ "``` oz\nleading spaces\n ```\n",
+ "
leading spaces\n
\n",
+
+ " ``` oz\nleading spaces\n ```\n",
+ "
``` oz\n
\n\n
leading spaces
\n\n
```\n
\n",
+ }
+ doTestsBlock(t, tests, EXTENSION_FENCED_CODE|EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK)
+}
+
+func TestTitleBlock_EXTENSION_TITLEBLOCK(t *testing.T) {
+ var tests = []string{
+ "% Some title\n" +
+ "% Another title line\n" +
+ "% Yep, more here too\n",
+ "
" +
+ "Some title\n" +
+ "Another title line\n" +
+ "Yep, more here too\n" +
+ " ",
+ }
+ doTestsBlock(t, tests, EXTENSION_TITLEBLOCK)
+}
+
+func TestBlockComments(t *testing.T) {
+ var tests = []string{
+ "Some text\n\n\n",
+ "
Some text
\n\n\n",
+
+ "Some text\n\n\n",
+ "
Some text
\n\n\n",
+
+ "Some text\n\n\n",
+ "
Some text
\n\n\n",
+ }
+ doTestsBlock(t, tests, 0)
+}
+
+func TestCDATA(t *testing.T) {
+ var tests = []string{
+ "Some text\n\n\n",
+ "
Some text
\n\n\n",
+
+ "CDATA ]]\n\n\n",
+ "
CDATA ]]
\n\n\n",
+
+ "CDATA >\n\n]]>\n",
+ "
CDATA >
\n\n]]>\n",
+
+ "Lots of text\n\n
\n",
+ "Lots of text
\n\n\n",
+
+ "]]>\n",
+ "]]>\n",
+ }
+ doTestsBlock(t, tests, 0)
+ doTestsBlock(t, []string{
+ "``` html\n\n```\n",
+ "<![CDATA[foo]]>\n
\n",
+
+ "\n",
+ "\n",
+
+ ` def func():
+> pass
+]]>
+`,
+ ` def func():
+> pass
+]]>
+`,
+ }, EXTENSION_FENCED_CODE)
+}
diff --git a/vendor/github.com/russross/blackfriday/html.go b/vendor/github.com/russross/blackfriday/html.go
new file mode 100644
index 0000000000..74e67ee82b
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/html.go
@@ -0,0 +1,949 @@
+//
+// Blackfriday Markdown Processor
+// Available at http://github.com/russross/blackfriday
+//
+// Copyright © 2011 Russ Ross .
+// Distributed under the Simplified BSD License.
+// See README.md for details.
+//
+
+//
+//
+// HTML rendering backend
+//
+//
+
+package blackfriday
+
+import (
+ "bytes"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// Html renderer configuration options.
+const (
+ HTML_SKIP_HTML = 1 << iota // skip preformatted HTML blocks
+ HTML_SKIP_STYLE // skip embedded
+
+
+
+HTTP charset
+
+
+
+
+
+
+
+
+
+
+
+
+
The character encoding of a page can be set using the HTTP header charset declaration.
+
The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector .test div.ÜÀÚ
. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.
The only character encoding declaration for this HTML file is in the HTTP header, which sets the encoding to ISO 8859-15.
+
+
+
+
+
+
+